diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -99,7 +99,7 @@ Agnostic, }; PolicyType TailPolicy = Agnostic; - PolicyType MaskPolicy = Undisturbed; + PolicyType MaskPolicy = Agnostic; bool HasTailPolicy, HasMaskPolicy; Policy(bool HasTailPolicy, bool HasMaskPolicy) : IsUnspecified(true), HasTailPolicy(HasTailPolicy), diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -1020,16 +1020,11 @@ PolicyAttrs.IsUnspecified = false; if (IsMasked) { Name += "_m"; - // FIXME: Currently _m default policy implementation is different with - // RVV intrinsic spec (TUMA) - PolicyAttrs.TailPolicy = Policy::PolicyType::Undisturbed; - PolicyAttrs.MaskPolicy = Policy::PolicyType::Undisturbed; if (HasPolicy) - BuiltinName += "_tumu"; + BuiltinName += "_tama"; else BuiltinName += "_m"; } else { - PolicyAttrs.TailPolicy = Policy::PolicyType::Agnostic; if (HasPolicy) BuiltinName += "_ta"; } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vaadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vaadd_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vaadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vaadd_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vaadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vaadd_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vaadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vaadd_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vaadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vaadd_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vaadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vaadd_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vaadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vaadd_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vaadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vaadd_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vaadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vaadd_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vaadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vaadd_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vaadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vaadd_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vaadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vaadd_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vaadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vaadd_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vaadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vaadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vaadd_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vaadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vaadd_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vaadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vaadd_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vaadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vaadd_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vaadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vaadd_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vaadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vaadd_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vaadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vaadd_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vaadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vaadd_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vaaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vaaddu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vaaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vaaddu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vaaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vaaddu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vaaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vaaddu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vaaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vaaddu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vaaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vaaddu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vaaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vaaddu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vaaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vaaddu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vaaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vaaddu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vaaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vaaddu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vaaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vaaddu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vaaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vaaddu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vaaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vaaddu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vaaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vaaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vaaddu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vaaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vaaddu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vaaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vaaddu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vaaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vaaddu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vaaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vaaddu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vaaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vaaddu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vaaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vaaddu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vaaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vaaddu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vadd_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vadd_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vadd_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vadd_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vadd_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vadd_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vadd_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vadd_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vadd_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vadd_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vadd_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vadd_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vadd_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vadd_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vadd_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vadd_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vadd_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vadd_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vadd_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vadd_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vadd_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vadd_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vadd_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vadd_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vadd_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vadd_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vadd_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vadd_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vadd_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vadd_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vadd_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vadd_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vadd_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vadd_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vadd_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vadd_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vadd_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vadd_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vadd_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vadd_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vadd_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vadd_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vadd_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vadd_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vadd_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vadd_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vadd_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vadd_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vadd_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vadd_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vadd_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vand_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vand_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vand_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vand_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vand_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vand_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vand_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vand_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vand_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vand_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vand_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vand_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vand_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vand_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vand_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vand_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vand_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vand_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vand_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vand_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vand_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vand_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vand_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vand_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vand_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vand_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vand_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vand_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vand_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vand_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vand_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vand_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vand_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vand_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vand_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vand_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vand_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vand_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vand_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vand_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vand_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vand_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vand_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vand_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vasub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vasub_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vasub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vasub_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vasub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vasub_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vasub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vasub_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vasub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vasub_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vasub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vasub_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vasub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vasub_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vasub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vasub_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vasub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vasub_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vasub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vasub_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vasub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vasub_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vasub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vasub_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vasub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vasub_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vasub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vasub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vasub_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vasub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vasub_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vasub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vasub_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vasub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vasub_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vasub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vasub_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vasub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vasub_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vasub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vasub_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vasub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vasub_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vasubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vasubu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vasubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vasubu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vasubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vasubu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vasubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vasubu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vasubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vasubu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vasubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vasubu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vasubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vasubu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vasubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vasubu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vasubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vasubu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vasubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vasubu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vasubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vasubu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vasubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vasubu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vasubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vasubu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vasubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vasubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vasubu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vasubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vasubu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vasubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vasubu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vasubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vasubu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vasubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vasubu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vasubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vasubu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vasubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vasubu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vasubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vasubu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vdiv_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vdiv_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vdiv_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vdiv_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vdiv_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vdiv_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vdiv_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vdiv_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vdiv_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vdiv_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vdiv_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vdiv_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vdiv_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vdiv_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vdiv_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vdiv_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vdiv_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vdiv_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vdiv_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vdiv_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vdiv_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vdivu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vdivu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vdivu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vdivu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vdivu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vdivu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vdivu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vdivu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vdivu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vdivu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vdivu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vdivu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vdivu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vdivu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vdivu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vdivu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vdivu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vdivu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vdivu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vdivu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vdivu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfabs_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfabs_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfabs_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfabs_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfabs_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfabs_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfabs_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfabs_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfabs_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfabs_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfabs_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfabs_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfabs_v_f32mf2_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfabs_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfabs_v_f32m1_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfabs_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfabs_v_f32m2_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfabs_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfabs_v_f32m4_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfabs_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfabs_v_f32m8_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfabs_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfabs_v_f64m1_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfabs_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfabs_v_f64m2_m(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfabs_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfabs_v_f64m4_m(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfabs_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfabs_v_f64m8_m(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfabs_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfadd_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfadd_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfadd_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfadd_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfadd_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfadd_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfadd_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfadd_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfadd_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfadd_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfadd_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfadd_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfadd_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfadd_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfadd_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfadd_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfadd_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfadd_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfadd_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfadd_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfadd_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfadd_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfadd_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfadd_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfadd_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfadd_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfadd_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfadd_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfclass_v_u16mf4_m(mask, maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfclass_v_u16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfclass_v_u16mf2_m(mask, maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfclass_v_u16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfclass_v_u16m1_m(mask, maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfclass_v_u16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfclass_v_u16m2_m(mask, maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfclass_v_u16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfclass_v_u16m4_m(mask, maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfclass_v_u16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfclass_v_u16m8_m(mask, maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfclass_v_u16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfclass_v_u32mf2_m(mask, maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfclass_v_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfclass_v_u32m1_m(mask, maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfclass_v_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfclass_v_u32m2_m(mask, maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfclass_v_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfclass_v_u32m4_m(mask, maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfclass_v_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfclass_v_u32m8_m(mask, maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfclass_v_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfclass_v_u64m1_m(mask, maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfclass_v_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfclass_v_u64m2_m(mask, maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfclass_v_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfclass_v_u64m4_m(mask, maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfclass_v_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfclass_v_u64m8_m(mask, maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfclass_v_u64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c @@ -819,811 +819,811 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_x_f_v_i16mf4_m(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_x_f_v_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf4_m(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_x_f_v_i16mf2_m(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_x_f_v_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf2_m(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_x_f_v_i16m1_m(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_x_f_v_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m1_m(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_x_f_v_i16m2_m(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_x_f_v_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m2_m(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_x_f_v_i16m4_m(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_x_f_v_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m4_m(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_x_f_v_i16m8_m(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_x_f_v_i16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m8_m(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_xu_f_v_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_xu_f_v_u16m1_m(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_xu_f_v_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m1_m(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_xu_f_v_u16m2_m(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_xu_f_v_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m2_m(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_xu_f_v_u16m4_m(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_xu_f_v_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m4_m(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_xu_f_v_u16m8_m(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_xu_f_v_u16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m8_m(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vfcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vint16mf4_t src, size_t vl) { + return vfcvt_f_x_v_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vfcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vint16mf2_t src, size_t vl) { + return vfcvt_f_x_v_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vfcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vint16m1_t src, size_t vl) { + return vfcvt_f_x_v_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vfcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vint16m2_t src, size_t vl) { + return vfcvt_f_x_v_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vfcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vint16m4_t src, size_t vl) { + return vfcvt_f_x_v_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vfcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vint16m8_t src, size_t vl) { + return vfcvt_f_x_v_f16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { + return vfcvt_f_xu_v_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vfcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint16m1_t src, size_t vl) { + return vfcvt_f_xu_v_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vfcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint16m2_t src, size_t vl) { + return vfcvt_f_xu_v_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vfcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint16m4_t src, size_t vl) { + return vfcvt_f_xu_v_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vfcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint16m8_t src, size_t vl) { + return vfcvt_f_xu_v_f16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_f_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_x_f_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_x_f_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_x_f_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_x_f_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_xu_f_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_xu_f_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_xu_f_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_xu_f_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vfcvt_f_x_v_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfcvt_f_x_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vfcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vfcvt_f_x_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vfcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vfcvt_f_x_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vfcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vfcvt_f_x_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vfcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vint32m8_t src, size_t vl) { + return vfcvt_f_x_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vfcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vfcvt_f_xu_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vfcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vfcvt_f_xu_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vfcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vfcvt_f_xu_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vfcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint32m8_t src, size_t vl) { + return vfcvt_f_xu_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_x_f_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_x_f_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_x_f_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_x_f_v_i64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_xu_f_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m1_m(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_xu_f_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m2_m(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_xu_f_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m4_m(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_xu_f_v_u64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vfcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vint64m1_t src, size_t vl) { + return vfcvt_f_x_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vfcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vint64m2_t src, size_t vl) { + return vfcvt_f_x_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vfcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vint64m4_t src, size_t vl) { + return vfcvt_f_x_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vfcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vint64m8_t src, size_t vl) { + return vfcvt_f_x_v_f64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vfcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint64m1_t src, size_t vl) { + return vfcvt_f_xu_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vfcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint64m2_t src, size_t vl) { + return vfcvt_f_xu_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vfcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint64m4_t src, size_t vl) { + return vfcvt_f_xu_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vfcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint64m8_t src, size_t vl) { + return vfcvt_f_xu_v_f64m8_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfdiv_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfdiv_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfdiv_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfdiv_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfdiv_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfdiv_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfdiv_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfdiv_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfdiv_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfdiv_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfdiv_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfdiv_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfdiv_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfdiv_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfdiv_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfdiv_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfdiv_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfdiv_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfdiv_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfdiv_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfdiv_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfdiv_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfdiv_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfdiv_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfdiv_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfdiv_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfdiv_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfdiv_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfdiv_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfdiv_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfdiv_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmax_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmax_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmax_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmax_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmax_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmax_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmax_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmax_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmax_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmax_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfmax_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmax_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmax_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmax_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfmax_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmax_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmax_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmax_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfmax_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmax_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmax_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmax_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfmax_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmax_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmax_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmax_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfmax_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmax_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmax_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmax_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfmax_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmax_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmax_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmax_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfmax_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmax_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmax_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmax_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfmax_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmin_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmin_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmin_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmin_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmin_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmin_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmin_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmin_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmin_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmin_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfmin_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmin_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmin_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmin_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfmin_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmin_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmin_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmin_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfmin_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmin_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmin_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmin_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfmin_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmin_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmin_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmin_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfmin_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmin_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmin_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmin_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfmin_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmin_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmin_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmin_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfmin_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmin_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmin_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmin_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfmin_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmul_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmul_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmul_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmul_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmul_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmul_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmul_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmul_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfmul_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmul_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfmul_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmul_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfmul_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmul_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfmul_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmul_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfmul_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmul_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfmul_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmul_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfmul_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmul_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfmul_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c @@ -873,865 +873,865 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_x_f_w_i8mf8_m(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_x_f_w_i8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf8_m(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_x_f_w_i8mf4_m(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_x_f_w_i8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf4_m(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_x_f_w_i8mf2_m(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_x_f_w_i8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf2_m(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_x_f_w_i8m1_m(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_x_f_w_i8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m1_m(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_x_f_w_i8m2_m(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_x_f_w_i8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m2_m(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_x_f_w_i8m4_m(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_x_f_w_i8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m4_m(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_xu_f_w_u8m1_m(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_xu_f_w_u8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m1_m(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_xu_f_w_u8m2_m(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_xu_f_w_u8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m2_m(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_xu_f_w_u8m4_m(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_xu_f_w_u8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m4_m(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_x_f_w_i16mf4_m(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_f_w_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf4_m(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_x_f_w_i16mf2_m(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_x_f_w_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf2_m(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_x_f_w_i16m1_m(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_x_f_w_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m1_m(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_x_f_w_i16m2_m(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_x_f_w_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m2_m(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_x_f_w_i16m4_m(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_x_f_w_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m4_m(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_xu_f_w_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m1_m(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_xu_f_w_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m2_m(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_xu_f_w_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m4_m(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vfncvt_f_x_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfncvt_f_x_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vfncvt_f_x_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vfncvt_f_x_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vfncvt_f_x_w_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vfncvt_f_x_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vfncvt_f_x_w_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vfncvt_f_x_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vfncvt_f_x_w_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { + return vfncvt_f_x_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vfncvt_f_xu_w_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vfncvt_f_xu_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vfncvt_f_xu_w_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vfncvt_f_xu_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vfncvt_f_xu_w_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { + return vfncvt_f_xu_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_f_f_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_f_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_f_f_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_f_f_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_f_f_w_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_f_f_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_f_f_w_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_f_f_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_f_f_w_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_f_f_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_x_f_w_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_x_f_w_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_x_f_w_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_x_f_w_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_x_f_w_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_x_f_w_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_x_f_w_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_x_f_w_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_xu_f_w_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_xu_f_w_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_xu_f_w_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_xu_f_w_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_xu_f_w_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_xu_f_w_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_xu_f_w_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_xu_f_w_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vfncvt_f_x_w_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { + return vfncvt_f_x_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vfncvt_f_x_w_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { + return vfncvt_f_x_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vfncvt_f_x_w_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { + return vfncvt_f_x_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vfncvt_f_x_w_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { + return vfncvt_f_x_w_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vfncvt_f_xu_w_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { + return vfncvt_f_xu_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vfncvt_f_xu_w_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { + return vfncvt_f_xu_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vfncvt_f_xu_w_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { + return vfncvt_f_xu_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vfncvt_f_xu_w_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { + return vfncvt_f_xu_w_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_f_f_w_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_f_f_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_rod_f_f_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_f_f_w_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_f_f_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_f_f_w_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_f_f_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_rod_f_f_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_f_f_w_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_f_f_w_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_rod_f_f_w_f32m4_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfneg_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfneg_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfneg_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfneg_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfneg_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfneg_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfneg_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfneg_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfneg_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfneg_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfneg_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfneg_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfneg_v_f32mf2_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfneg_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfneg_v_f32m1_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfneg_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfneg_v_f32m2_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfneg_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfneg_v_f32m4_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfneg_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfneg_v_f32m8_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfneg_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfneg_v_f64m1_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfneg_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfneg_v_f64m2_m(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfneg_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfneg_v_f64m4_m(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfneg_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfneg_v_f64m8_m(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfneg_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrdiv.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfrdiv_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfrdiv_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfrdiv_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfrdiv_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrec7_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfrec7_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrec7_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfrec7_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrec7_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfrec7_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrec7_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfrec7_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrec7_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfrec7_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrec7_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfrec7_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrec7_v_f32mf2_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfrec7_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrec7_v_f32m1_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfrec7_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrec7_v_f32m2_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfrec7_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrec7_v_f32m4_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfrec7_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrec7_v_f32m8_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfrec7_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrec7_v_f64m1_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfrec7_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrec7_v_f64m2_m(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfrec7_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrec7_v_f64m4_m(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfrec7_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrec7_v_f64m8_m(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfrec7_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrsqrt7_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfrsqrt7_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrsqrt7_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfrsqrt7_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrsqrt7_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfrsqrt7_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrsqrt7_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfrsqrt7_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrsqrt7_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfrsqrt7_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrsqrt7_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfrsqrt7_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrsqrt7_v_f32mf2_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrsqrt7_v_f32m1_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfrsqrt7_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrsqrt7_v_f32m2_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfrsqrt7_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrsqrt7_v_f32m4_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfrsqrt7_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrsqrt7_v_f32m8_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfrsqrt7_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrsqrt7_v_f64m1_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfrsqrt7_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrsqrt7_v_f64m2_m(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfrsqrt7_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrsqrt7_v_f64m4_m(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfrsqrt7_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrsqrt7_v_f64m8_m(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfrsqrt7_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfrsub_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfrsub_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfrsub_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfrsub_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfrsub_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfrsub_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfrsub_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfrsub_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnj_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnj_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnj_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnj_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnj_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnj_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnj_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnj_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnj_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnj_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnj_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnj_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnj_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnj_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnj_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnj_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnj_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnj_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsgnj_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnj_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsgnj_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnj_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsgnj_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfsgnj_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnj_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsgnj_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfsgnj_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnj_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsgnj_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfsgnj_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnj_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsgnj_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfsgnj_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjn_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjn_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjn_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjn_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjn_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjn_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjn_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjn_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjn_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjn_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjn_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjn_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjn_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjn_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnjn_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjn_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnjn_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjn_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsgnjn_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjn_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsgnjn_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjn_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsgnjn_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfsgnjn_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjn_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsgnjn_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfsgnjn_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjn_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsgnjn_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfsgnjn_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjn_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsgnjn_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfsgnjn_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjx_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjx_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjx_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjx_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjx_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjx_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjx_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjx_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjx_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjx_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjx_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjx_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjx_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjx_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnjx_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjx_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnjx_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjx_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsgnjx_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjx_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsgnjx_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjx_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsgnjx_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfsgnjx_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjx_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsgnjx_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfsgnjx_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjx_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsgnjx_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfsgnjx_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjx_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsgnjx_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfsgnjx_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf4_m(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf2_m(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m1_m(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m2_m(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m4_m(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m8_m(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1down_vf_f32mf2_m(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_vf_f32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1down_vf_f32m1_m(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float value, size_t vl) { + return vfslide1down_vf_f32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1down_vf_f32m2_m(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float value, size_t vl) { + return vfslide1down_vf_f32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1down_vf_f32m4_m(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float value, size_t vl) { + return vfslide1down_vf_f32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1down_vf_f32m8_m(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float value, size_t vl) { + return vfslide1down_vf_f32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1down_vf_f64m1_m(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, double value, size_t vl) { + return vfslide1down_vf_f64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1down_vf_f64m2_m(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, double value, size_t vl) { + return vfslide1down_vf_f64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1down_vf_f64m4_m(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, double value, size_t vl) { + return vfslide1down_vf_f64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1down_vf_f64m8_m(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, double value, size_t vl) { + return vfslide1down_vf_f64m8_m(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf4_m(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf2_m(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m1_m(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m2_m(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m4_m(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m8_m(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1up_vf_f32mf2_m(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_vf_f32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1up_vf_f32m1_m(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float value, size_t vl) { + return vfslide1up_vf_f32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1up_vf_f32m2_m(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float value, size_t vl) { + return vfslide1up_vf_f32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1up_vf_f32m4_m(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float value, size_t vl) { + return vfslide1up_vf_f32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1up_vf_f32m8_m(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float value, size_t vl) { + return vfslide1up_vf_f32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1up_vf_f64m1_m(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, double value, size_t vl) { + return vfslide1up_vf_f64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1up_vf_f64m2_m(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, double value, size_t vl) { + return vfslide1up_vf_f64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1up_vf_f64m4_m(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, double value, size_t vl) { + return vfslide1up_vf_f64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1up_vf_f64m8_m(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, double value, size_t vl) { + return vfslide1up_vf_f64m8_m(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfsqrt_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfsqrt_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfsqrt_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfsqrt_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfsqrt_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfsqrt_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfsqrt_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfsqrt_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfsqrt_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfsqrt_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfsqrt_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfsqrt_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfsqrt_v_f32mf2_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfsqrt_v_f32m1_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfsqrt_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfsqrt_v_f32m2_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfsqrt_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfsqrt_v_f32m4_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfsqrt_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfsqrt_v_f32m8_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfsqrt_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfsqrt_v_f64m1_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfsqrt_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfsqrt_v_f64m2_m(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfsqrt_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfsqrt_v_f64m4_m(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfsqrt_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfsqrt_v_f64m8_m(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfsqrt_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsub_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsub_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsub_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsub_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsub_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsub_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsub_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsub_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsub_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsub_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsub_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsub_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsub_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfsub_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsub_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfsub_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsub_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfsub_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsub_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfsub_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsub_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfsub_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsub_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfsub_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsub_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfsub_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsub_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfsub_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd.c @@ -333,325 +333,325 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_wv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_wv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_wv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_wv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_wv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_wv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_wv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_wv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_wv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_wv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_wv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfwadd_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_wv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { + return vfwadd_wv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwadd_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_wv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { + return vfwadd_wv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwadd_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_wv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { + return vfwadd_wv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c @@ -684,676 +684,676 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) { + return vfwcvt_f_x_v_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) { + return vfwcvt_f_x_v_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vfwcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { + return vfwcvt_f_x_v_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { + return vfwcvt_f_x_v_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vfwcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { + return vfwcvt_f_x_v_f16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) { + return vfwcvt_f_xu_v_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) { + return vfwcvt_f_xu_v_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfwcvt_x_f_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfwcvt_x_f_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfwcvt_x_f_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfwcvt_x_f_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfwcvt_x_f_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfwcvt_xu_f_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) { + return vfwcvt_f_x_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vfwcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { + return vfwcvt_f_x_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { + return vfwcvt_f_x_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vfwcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { + return vfwcvt_f_x_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { + return vfwcvt_f_xu_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { + return vfwcvt_f_xu_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { + return vfwcvt_f_xu_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { + return vfwcvt_f_xu_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_f_f_v_f32mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfwcvt_f_f_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_f_f_v_f32m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfwcvt_f_f_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfwcvt_f_f_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_f_f_v_f32m8_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfwcvt_f_f_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_f_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfwcvt_x_f_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfwcvt_x_f_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfwcvt_x_f_v_i64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m1_m(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m2_m(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m4_m(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vfwcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vfwcvt_f_x_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vfwcvt_f_x_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vfwcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vfwcvt_f_x_v_f64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m1_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_f_f_v_f64m2_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfwcvt_f_f_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m4_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfwcvt_f_f_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_f_f_v_f64m8_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfwcvt_f_f_v_f64m8_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul.c @@ -171,163 +171,163 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwmul_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwmul_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwmul_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwmul_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwmul_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwmul_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfwmul_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwmul_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwmul_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c @@ -108,100 +108,100 @@ // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf4_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16mf4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m1_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m4_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m8_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m1_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m2_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m4_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m8_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c @@ -108,100 +108,100 @@ // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf4_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16mf4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m1_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m2_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m4_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m8_f32m1_m(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32mf2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m1_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m2_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m4_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m8_f64m1_m(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub.c @@ -333,325 +333,325 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_wv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_wv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_wv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_wv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_wv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_wv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_wv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_wv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_wv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_wv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_wv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfwsub_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_wv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { + return vfwsub_wv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwsub_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_wv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { + return vfwsub_wv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwsub_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_wv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { + return vfwsub_wv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vid.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vid.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vid.c @@ -206,199 +206,199 @@ // CHECK-RV64-LABEL: @test_vid_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i8.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return vid_v_u8mf8_m(mask, maskedoff, vl); +vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, size_t vl) { + return vid_v_u8mf8_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i8.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return vid_v_u8mf4_m(mask, maskedoff, vl); +vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, size_t vl) { + return vid_v_u8mf4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i8.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return vid_v_u8mf2_m(mask, maskedoff, vl); +vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, size_t vl) { + return vid_v_u8mf2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i8.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return vid_v_u8m1_m(mask, maskedoff, vl); +vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, size_t vl) { + return vid_v_u8m1_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i8.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return vid_v_u8m2_m(mask, maskedoff, vl); +vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, size_t vl) { + return vid_v_u8m2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i8.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return vid_v_u8m4_m(mask, maskedoff, vl); +vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, size_t vl) { + return vid_v_u8m4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv64i8.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return vid_v_u8m8_m(mask, maskedoff, vl); +vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, size_t vl) { + return vid_v_u8m8_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i16.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return vid_v_u16mf4_m(mask, maskedoff, vl); +vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, size_t vl) { + return vid_v_u16mf4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i16.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return vid_v_u16mf2_m(mask, maskedoff, vl); +vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, size_t vl) { + return vid_v_u16mf2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i16.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return vid_v_u16m1_m(mask, maskedoff, vl); +vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, size_t vl) { + return vid_v_u16m1_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i16.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return vid_v_u16m2_m(mask, maskedoff, vl); +vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, size_t vl) { + return vid_v_u16m2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i16.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return vid_v_u16m4_m(mask, maskedoff, vl); +vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, size_t vl) { + return vid_v_u16m4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i16.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return vid_v_u16m8_m(mask, maskedoff, vl); +vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, size_t vl) { + return vid_v_u16m8_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return vid_v_u32mf2_m(mask, maskedoff, vl); +vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, size_t vl) { + return vid_v_u32mf2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i32.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return vid_v_u32m1_m(mask, maskedoff, vl); +vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, size_t vl) { + return vid_v_u32m1_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i32.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return vid_v_u32m2_m(mask, maskedoff, vl); +vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, size_t vl) { + return vid_v_u32m2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i32.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return vid_v_u32m4_m(mask, maskedoff, vl); +vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, size_t vl) { + return vid_v_u32m4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i32.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return vid_v_u32m8_m(mask, maskedoff, vl); +vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, size_t vl) { + return vid_v_u32m8_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i64.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return vid_v_u64m1_m(mask, maskedoff, vl); +vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, size_t vl) { + return vid_v_u64m1_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i64.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return vid_v_u64m2_m(mask, maskedoff, vl); +vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, size_t vl) { + return vid_v_u64m2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i64.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return vid_v_u64m4_m(mask, maskedoff, vl); +vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, size_t vl) { + return vid_v_u64m4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i64.i64( poison, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return vid_v_u64m8_m(mask, maskedoff, vl); +vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, size_t vl) { + return vid_v_u64m8_m(mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/viota.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/viota.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/viota.c @@ -206,199 +206,199 @@ // CHECK-RV64-LABEL: @test_viota_m_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u8mf8_m(mask, maskedoff, op1, vl); +vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return viota_m_u8mf8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u8mf4_m(mask, maskedoff, op1, vl); +vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return viota_m_u8mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u8mf2_m(mask, maskedoff, op1, vl); +vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return viota_m_u8mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u8m1_m(mask, maskedoff, op1, vl); +vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return viota_m_u8m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u8m2_m(mask, maskedoff, op1, vl); +vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return viota_m_u8m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return viota_m_u8m4_m(mask, maskedoff, op1, vl); +vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vbool2_t op1, size_t vl) { + return viota_m_u8m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return viota_m_u8m8_m(mask, maskedoff, op1, vl); +vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vbool1_t op1, size_t vl) { + return viota_m_u8m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u16mf4_m(mask, maskedoff, op1, vl); +vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return viota_m_u16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u16mf2_m(mask, maskedoff, op1, vl); +vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return viota_m_u16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u16m1_m(mask, maskedoff, op1, vl); +vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return viota_m_u16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u16m2_m(mask, maskedoff, op1, vl); +vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return viota_m_u16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u16m4_m(mask, maskedoff, op1, vl); +vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return viota_m_u16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return viota_m_u16m8_m(mask, maskedoff, op1, vl); +vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vbool2_t op1, size_t vl) { + return viota_m_u16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u32mf2_m(mask, maskedoff, op1, vl); +vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return viota_m_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u32m1_m(mask, maskedoff, op1, vl); +vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return viota_m_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u32m2_m(mask, maskedoff, op1, vl); +vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return viota_m_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u32m4_m(mask, maskedoff, op1, vl); +vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return viota_m_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u32m8_m(mask, maskedoff, op1, vl); +vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return viota_m_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u64m1_m(mask, maskedoff, op1, vl); +vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return viota_m_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u64m2_m(mask, maskedoff, op1, vl); +vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return viota_m_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u64m4_m(mask, maskedoff, op1, vl); +vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return viota_m_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u64m8_m(mask, maskedoff, op1, vl); +vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return viota_m_u64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16.c @@ -171,163 +171,163 @@ // CHECK-RV64-LABEL: @test_vle16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf4_m(mask, maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size_t vl) { + return vle16_v_f16mf4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf2_m(mask, maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size_t vl) { + return vle16_v_f16mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m1_m(mask, maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t vl) { + return vle16_v_f16m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m2_m(mask, maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t vl) { + return vle16_v_f16m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m4_m(mask, maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t vl) { + return vle16_v_f16m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m8_m(mask, maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t vl) { + return vle16_v_f16m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16mf4_m(mask, maskedoff, base, vl); +vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t vl) { + return vle16_v_i16mf4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16mf2_m(mask, maskedoff, base, vl); +vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t vl) { + return vle16_v_i16mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m1_m(mask, maskedoff, base, vl); +vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t vl) { + return vle16_v_i16m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m2_m(mask, maskedoff, base, vl); +vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t vl) { + return vle16_v_i16m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m4_m(mask, maskedoff, base, vl); +vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t vl) { + return vle16_v_i16m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m8_m(mask, maskedoff, base, vl); +vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t vl) { + return vle16_v_i16m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16mf4_m(mask, maskedoff, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t vl) { + return vle16_v_u16mf4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16mf2_m(mask, maskedoff, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t vl) { + return vle16_v_u16mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m1_m(mask, maskedoff, base, vl); +vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t vl) { + return vle16_v_u16m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m2_m(mask, maskedoff, base, vl); +vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t vl) { + return vle16_v_u16m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m4_m(mask, maskedoff, base, vl); +vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t vl) { + return vle16_v_u16m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m8_m(mask, maskedoff, base, vl); +vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t vl) { + return vle16_v_u16m8_m(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16ff.c @@ -225,217 +225,217 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf4_m(mask, maskedoff, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16mf4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf2_m(mask, maskedoff, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m1_m(mask, maskedoff, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m2_m(mask, maskedoff, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m4_m(mask, maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m8_m(mask, maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf4_m(mask, maskedoff, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_i16mf4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf2_m(mask, maskedoff, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_i16mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m1_m(mask, maskedoff, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_i16m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m2_m(mask, maskedoff, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_i16m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m4_m(mask, maskedoff, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_i16m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m8_m(mask, maskedoff, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_i16m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf4_m(mask, maskedoff, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_u16mf4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf2_m(mask, maskedoff, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_u16mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m1_m(mask, maskedoff, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_u16m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m2_m(mask, maskedoff, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_u16m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m4_m(mask, maskedoff, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_u16m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m8_m(mask, maskedoff, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff_v_u16m8_m(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32mf2_m(mask, maskedoff, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, const float *base, size_t vl) { + return vle32_v_f32mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m1_m(mask, maskedoff, base, vl); +vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, const float *base, size_t vl) { + return vle32_v_f32m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m2_m(mask, maskedoff, base, vl); +vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, const float *base, size_t vl) { + return vle32_v_f32m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m4_m(mask, maskedoff, base, vl); +vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, const float *base, size_t vl) { + return vle32_v_f32m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m8_m(mask, maskedoff, base, vl); +vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, const float *base, size_t vl) { + return vle32_v_f32m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32mf2_m(mask, maskedoff, base, vl); +vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t vl) { + return vle32_v_i32mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m1_m(mask, maskedoff, base, vl); +vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl) { + return vle32_v_i32m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m2_m(mask, maskedoff, base, vl); +vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t vl) { + return vle32_v_i32m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m4_m(mask, maskedoff, base, vl); +vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t vl) { + return vle32_v_i32m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m8_m(mask, maskedoff, base, vl); +vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t vl) { + return vle32_v_i32m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32mf2_m(mask, maskedoff, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t vl) { + return vle32_v_u32mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m1_m(mask, maskedoff, base, vl); +vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t vl) { + return vle32_v_u32m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m2_m(mask, maskedoff, base, vl); +vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t vl) { + return vle32_v_u32m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m4_m(mask, maskedoff, base, vl); +vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t vl) { + return vle32_v_u32m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m8_m(mask, maskedoff, base, vl); +vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t vl) { + return vle32_v_u32m8_m(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32ff.c @@ -189,181 +189,181 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32mf2_m(mask, maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m1_m(mask, maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m2_m(mask, maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m4_m(mask, maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t mask, const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m8_m(mask, maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32mf2_m(mask, maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m1_m(mask, maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m2_m(mask, maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m4_m(mask, maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m8_m(mask, maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32mf2_m(mask, maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m1_m(mask, maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m2_m(mask, maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m4_m(mask, maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vle32ff_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m8_m(mask, maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m8_m(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64.c @@ -117,109 +117,109 @@ // CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m1_m(mask, maskedoff, base, vl); +vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, const double *base, size_t vl) { + return vle64_v_f64m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m2_m(mask, maskedoff, base, vl); +vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, const double *base, size_t vl) { + return vle64_v_f64m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m4_m(mask, maskedoff, base, vl); +vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, const double *base, size_t vl) { + return vle64_v_f64m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m8_m(mask, maskedoff, base, vl); +vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, const double *base, size_t vl) { + return vle64_v_f64m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m1_m(mask, maskedoff, base, vl); +vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t vl) { + return vle64_v_i64m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m2_m(mask, maskedoff, base, vl); +vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t vl) { + return vle64_v_i64m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m4_m(mask, maskedoff, base, vl); +vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t vl) { + return vle64_v_i64m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m8_m(mask, maskedoff, base, vl); +vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t vl) { + return vle64_v_i64m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m1_m(mask, maskedoff, base, vl); +vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t vl) { + return vle64_v_u64m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m2_m(mask, maskedoff, base, vl); +vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t vl) { + return vle64_v_u64m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m4_m(mask, maskedoff, base, vl); +vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t vl) { + return vle64_v_u64m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m8_m(mask, maskedoff, base, vl); +vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t vl) { + return vle64_v_u64m8_m(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64ff.c @@ -153,145 +153,145 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m1_m(mask, maskedoff, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vle64ff_v_f64m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m2_m(mask, maskedoff, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { + return vle64ff_v_f64m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m4_m(mask, maskedoff, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t mask, const double *base, size_t *new_vl, size_t vl) { + return vle64ff_v_f64m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m8_m(mask, maskedoff, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, const double *base, size_t *new_vl, size_t vl) { + return vle64ff_v_f64m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m1_m(mask, maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vle64ff_v_i64m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m2_m(mask, maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vle64ff_v_i64m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m4_m(mask, maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vle64ff_v_i64m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m8_m(mask, maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vle64ff_v_i64m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m1_m(mask, maskedoff, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vle64ff_v_u64m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m2_m(mask, maskedoff, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vle64ff_v_u64m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m4_m(mask, maskedoff, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vle64ff_v_u64m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vle64ff_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m8_m(mask, maskedoff, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vle64ff_v_u64m8_m(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8.c @@ -135,127 +135,127 @@ // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf8_m(mask, maskedoff, base, vl); +vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t vl) { + return vle8_v_i8mf8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf4_m(mask, maskedoff, base, vl); +vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t vl) { + return vle8_v_i8mf4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf2_m(mask, maskedoff, base, vl); +vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t vl) { + return vle8_v_i8mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m1_m(mask, maskedoff, base, vl); +vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t vl) { + return vle8_v_i8m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m2_m(mask, maskedoff, base, vl); +vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t vl) { + return vle8_v_i8m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m4_m(mask, maskedoff, base, vl); +vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t vl) { + return vle8_v_i8m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m8_m(mask, maskedoff, base, vl); +vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t vl) { + return vle8_v_i8m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf8_m(mask, maskedoff, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t vl) { + return vle8_v_u8mf8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf4_m(mask, maskedoff, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t vl) { + return vle8_v_u8mf4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf2_m(mask, maskedoff, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t vl) { + return vle8_v_u8mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m1_m(mask, maskedoff, base, vl); +vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t vl) { + return vle8_v_u8m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m2_m(mask, maskedoff, base, vl); +vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t vl) { + return vle8_v_u8m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m4_m(mask, maskedoff, base, vl); +vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t vl) { + return vle8_v_u8m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m8_m(mask, maskedoff, base, vl); +vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t vl) { + return vle8_v_u8m8_m(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8ff.c @@ -177,169 +177,169 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8mf8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf4_m(mask, maskedoff, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8mf4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf2_m(mask, maskedoff, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m1_m(mask, maskedoff, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m2_m(mask, maskedoff, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m4_m(mask, maskedoff, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m8_m(mask, maskedoff, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf8_m(mask, maskedoff, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_u8mf8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf4_m(mask, maskedoff, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_u8mf4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf2_m(mask, maskedoff, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_u8mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m1_m(mask, maskedoff, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_u8m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m2_m(mask, maskedoff, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_u8m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m4_m(mask, maskedoff, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_u8m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m8_m(mask, maskedoff, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_u8m8_m(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei16.c @@ -522,514 +522,514 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f16m1_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f16m2_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f16m4_m(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_f16m8_m(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vloxei16_v_f16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16_v_i8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16_v_i16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16_v_u8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16_v_u16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei32.c @@ -477,469 +477,469 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f16m1_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f16m2_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f16m4_m(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei64.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f16m1_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f16m2_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f64m8_m(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei8.c @@ -540,532 +540,532 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f16m1_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f16m2_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f16m4_m(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_f16m8_m(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vloxei8_v_f16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8_v_i8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) { + return vloxei8_v_i8m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8_v_i16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8_v_u8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) { + return vloxei8_v_u8m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8_v_u16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c @@ -633,625 +633,625 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxseg2ei16_v_i8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxseg2ei16_v_u8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c @@ -607,599 +607,599 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c @@ -542,534 +542,534 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c @@ -633,625 +633,625 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxseg2ei8_v_i8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxseg2ei8_v_u8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg3ei16_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg3ei16_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c @@ -534,7 +534,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -543,13 +543,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -558,13 +558,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg3ei8_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg3ei8_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c @@ -604,7 +604,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -615,13 +615,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -632,13 +632,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,7 +1193,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse16.c @@ -171,163 +171,163 @@ // CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf4_m(mask, maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16mf4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf2_m(mask, maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m1_m(mask, maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m2_m(mask, maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m4_m(mask, maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m8_m(mask, maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf4_m(mask, maskedoff, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_i16mf4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf2_m(mask, maskedoff, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_i16mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m1_m(mask, maskedoff, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_i16m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m2_m(mask, maskedoff, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_i16m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m4_m(mask, maskedoff, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_i16m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m8_m(mask, maskedoff, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_i16m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf4_m(mask, maskedoff, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_u16mf4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf2_m(mask, maskedoff, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_u16mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m1_m(mask, maskedoff, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_u16m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m2_m(mask, maskedoff, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_u16m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m4_m(mask, maskedoff, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_u16m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m8_m(mask, maskedoff, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_u16m8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse32.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32mf2_m(mask, maskedoff, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_f32mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m1_m(mask, maskedoff, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_f32m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m2_m(mask, maskedoff, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_f32m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m4_m(mask, maskedoff, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_f32m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m8_m(mask, maskedoff, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_f32m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32mf2_m(mask, maskedoff, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_i32mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m1_m(mask, maskedoff, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_i32m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m2_m(mask, maskedoff, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_i32m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m4_m(mask, maskedoff, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_i32m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m8_m(mask, maskedoff, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_i32m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32mf2_m(mask, maskedoff, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_u32mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m1_m(mask, maskedoff, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_u32m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m2_m(mask, maskedoff, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_u32m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m4_m(mask, maskedoff, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_u32m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m8_m(mask, maskedoff, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32_v_u32m8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse64.c @@ -117,109 +117,109 @@ // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m1_m(mask, maskedoff, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_f64m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m2_m(mask, maskedoff, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_f64m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m4_m(mask, maskedoff, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_f64m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m8_m(mask, maskedoff, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_f64m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m1_m(mask, maskedoff, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_i64m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m2_m(mask, maskedoff, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_i64m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m4_m(mask, maskedoff, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_i64m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m8_m(mask, maskedoff, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_i64m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m1_m(mask, maskedoff, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_u64m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m2_m(mask, maskedoff, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_u64m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m4_m(mask, maskedoff, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_u64m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m8_m(mask, maskedoff, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64_v_u64m8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse8.c @@ -134,127 +134,127 @@ // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf8_m(mask, maskedoff, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_i8mf8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf4_m(mask, maskedoff, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_i8mf4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf2_m(mask, maskedoff, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_i8mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m1_m(mask, maskedoff, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_i8m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m2_m(mask, maskedoff, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_i8m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m4_m(mask, maskedoff, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_i8m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m8_m(mask, maskedoff, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_i8m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf8_m(mask, maskedoff, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_u8mf8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf4_m(mask, maskedoff, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_u8mf4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf2_m(mask, maskedoff, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_u8mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m1_m(mask, maskedoff, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_u8m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m2_m(mask, maskedoff, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_u8m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m4_m(mask, maskedoff, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_u8m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m8_m(mask, maskedoff, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8_v_u8m8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16.c @@ -204,196 +204,196 @@ // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16mf4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16mf4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16mf4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m4_m(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -243,13 +243,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16mf4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -273,13 +273,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -288,13 +288,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -303,13 +303,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -318,13 +318,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -333,13 +333,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -348,13 +348,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -363,13 +363,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -378,13 +378,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -393,13 +393,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -408,13 +408,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -423,13 +423,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -438,13 +438,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m4_m(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32.c @@ -165,157 +165,157 @@ // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, size_t vl) { + return vlseg2e32_v_f32mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, size_t vl) { + return vlseg2e32_v_f32m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, size_t vl) { + return vlseg2e32_v_f32m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, size_t vl) { + return vlseg2e32_v_f32m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m4_m(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -198,13 +198,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -243,13 +243,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -273,13 +273,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -288,13 +288,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -303,13 +303,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -318,13 +318,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -333,13 +333,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -348,13 +348,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m4_m(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64.c @@ -126,118 +126,118 @@ // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, size_t vl) { + return vlseg2e64_v_f64m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, size_t vl) { + return vlseg2e64_v_f64m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, size_t vl) { + return vlseg2e64_v_f64m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m4_m(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -168,13 +168,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -183,13 +183,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -198,13 +198,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -243,13 +243,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m4_m(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8.c @@ -164,157 +164,157 @@ // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf8_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf8_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m4_m(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -198,13 +198,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -243,13 +243,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -273,13 +273,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -288,13 +288,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf8_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -303,13 +303,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -318,13 +318,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -333,13 +333,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -348,13 +348,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m4_m(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16.c @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -198,13 +198,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16mf4_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -243,13 +243,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16mf4_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -273,13 +273,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -288,13 +288,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -303,13 +303,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -318,13 +318,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16mf4_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -333,13 +333,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -348,13 +348,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16m2_m(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -224,13 +224,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16mf4_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -241,13 +241,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -275,13 +275,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -292,13 +292,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf4_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -309,13 +309,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -326,13 +326,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -343,13 +343,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -360,13 +360,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf4_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -377,13 +377,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -394,13 +394,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -411,7 +411,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m2_m(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32.c @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, size_t vl) { + return vlseg3e32_v_f32mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -168,13 +168,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, size_t vl) { + return vlseg3e32_v_f32m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -183,13 +183,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, size_t vl) { + return vlseg3e32_v_f32m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -198,13 +198,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -243,13 +243,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32m2_m(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -190,13 +190,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -207,13 +207,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -224,13 +224,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -241,13 +241,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -275,13 +275,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -292,13 +292,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -309,7 +309,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m2_m(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64.c @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, size_t vl) { + return vlseg3e64_v_f64m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -123,13 +123,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, size_t vl) { + return vlseg3e64_v_f64m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg3e64_v_i64m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, size_t vl) { + return vlseg3e64_v_i64m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -168,13 +168,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg3e64_v_u64m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, size_t vl) { + return vlseg3e64_v_u64m2_m(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c @@ -111,7 +111,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -139,13 +139,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -156,13 +156,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -190,13 +190,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m2_m(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8.c @@ -158,7 +158,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -167,13 +167,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf8_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -182,13 +182,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf4_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -197,13 +197,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -227,13 +227,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -242,13 +242,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf8_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -257,13 +257,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf4_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -272,13 +272,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -287,13 +287,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -302,7 +302,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8m2_m(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -190,13 +190,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf8_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -207,13 +207,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf4_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -224,13 +224,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -241,13 +241,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -275,13 +275,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf8_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -292,13 +292,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf4_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -309,13 +309,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -326,13 +326,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -343,7 +343,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m2_m(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16.c @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -224,13 +224,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -241,13 +241,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -275,13 +275,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -292,13 +292,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -309,13 +309,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -326,13 +326,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -343,13 +343,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -360,13 +360,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -377,13 +377,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -394,13 +394,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -411,7 +411,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c @@ -237,7 +237,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -250,13 +250,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -269,13 +269,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -288,13 +288,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -307,13 +307,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -326,13 +326,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -345,13 +345,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -364,13 +364,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -383,13 +383,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -402,13 +402,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -421,13 +421,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -440,13 +440,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -459,7 +459,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32.c @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, size_t vl) { + return vlseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -190,13 +190,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, size_t vl) { + return vlseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -207,13 +207,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, size_t vl) { + return vlseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -224,13 +224,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -241,13 +241,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -275,13 +275,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -292,13 +292,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -309,7 +309,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -193,13 +193,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -231,13 +231,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -250,13 +250,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -269,13 +269,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -288,13 +288,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -307,13 +307,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -326,13 +326,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -345,7 +345,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64.c @@ -111,7 +111,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, size_t vl) { + return vlseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -139,13 +139,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, size_t vl) { + return vlseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -156,13 +156,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, size_t vl) { + return vlseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -190,13 +190,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, size_t vl) { + return vlseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -136,13 +136,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -155,13 +155,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -174,13 +174,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -193,13 +193,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -231,7 +231,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8.c @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -189,13 +189,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -206,13 +206,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -223,13 +223,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -240,13 +240,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -257,13 +257,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -274,13 +274,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -291,13 +291,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -308,13 +308,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -325,13 +325,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -342,7 +342,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf8_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -231,13 +231,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -250,13 +250,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -269,13 +269,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -288,13 +288,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -307,13 +307,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf8_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -326,13 +326,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -345,13 +345,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -364,13 +364,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -383,7 +383,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16.c @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -193,13 +193,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -231,13 +231,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -250,13 +250,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -269,13 +269,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -288,13 +288,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -307,13 +307,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -326,13 +326,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -345,7 +345,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -234,13 +234,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -255,13 +255,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -276,13 +276,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -297,13 +297,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -318,13 +318,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -339,13 +339,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -360,13 +360,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32.c @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -136,13 +136,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, size_t vl) { + return vlseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -155,13 +155,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, size_t vl) { + return vlseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -174,13 +174,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -193,13 +193,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -231,7 +231,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -150,13 +150,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -171,13 +171,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -192,13 +192,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -234,13 +234,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -255,7 +255,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64.c @@ -66,7 +66,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -79,13 +79,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, size_t vl) { + return vlseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -98,13 +98,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -129,7 +129,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8.c @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -192,13 +192,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -211,13 +211,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -230,13 +230,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -249,13 +249,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -268,13 +268,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -287,13 +287,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -306,7 +306,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -192,13 +192,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -234,13 +234,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -255,13 +255,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -276,13 +276,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -297,13 +297,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -318,13 +318,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -339,7 +339,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16.c @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -234,13 +234,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -255,13 +255,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -276,13 +276,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -297,13 +297,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -318,13 +318,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -339,13 +339,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -360,13 +360,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -233,13 +233,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -256,13 +256,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -279,13 +279,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -302,13 +302,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -325,13 +325,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -348,13 +348,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -371,13 +371,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -394,13 +394,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32.c @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -150,13 +150,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, size_t vl) { + return vlseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -171,13 +171,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, size_t vl) { + return vlseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -192,13 +192,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -234,13 +234,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -255,7 +255,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c @@ -147,7 +147,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -164,13 +164,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -187,13 +187,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -210,13 +210,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -233,13 +233,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -256,13 +256,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -279,7 +279,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64.c @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, size_t vl) { + return vlseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -129,7 +129,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -95,13 +95,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -118,13 +118,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -141,7 +141,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8.c @@ -176,7 +176,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -191,13 +191,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -233,13 +233,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -254,13 +254,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -275,13 +275,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -296,13 +296,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -317,13 +317,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -338,7 +338,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c @@ -193,7 +193,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -210,13 +210,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -233,13 +233,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -256,13 +256,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -279,13 +279,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -302,13 +302,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -325,13 +325,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -348,13 +348,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -371,7 +371,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16.c @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -233,13 +233,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -256,13 +256,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -279,13 +279,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -302,13 +302,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -325,13 +325,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -348,13 +348,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -371,13 +371,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -394,13 +394,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -253,13 +253,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -278,13 +278,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -303,13 +303,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -328,13 +328,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -353,13 +353,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -378,13 +378,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -403,13 +403,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -428,13 +428,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32.c @@ -147,7 +147,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -164,13 +164,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, size_t vl) { + return vlseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -187,13 +187,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, size_t vl) { + return vlseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -210,13 +210,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -233,13 +233,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -256,13 +256,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -279,7 +279,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -178,13 +178,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -203,13 +203,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -253,13 +253,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -278,13 +278,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -303,7 +303,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64.c @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -95,13 +95,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, size_t vl) { + return vlseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -118,13 +118,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -141,7 +141,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -103,13 +103,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8.c @@ -192,7 +192,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -209,13 +209,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -232,13 +232,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -255,13 +255,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -278,13 +278,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -301,13 +301,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -324,13 +324,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -347,13 +347,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -253,13 +253,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -278,13 +278,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -303,13 +303,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -328,13 +328,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -353,13 +353,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -378,13 +378,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -403,7 +403,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16.c @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -253,13 +253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -278,13 +278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -303,13 +303,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -328,13 +328,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -353,13 +353,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -378,13 +378,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -403,13 +403,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -428,13 +428,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -273,13 +273,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -300,13 +300,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -327,13 +327,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -354,13 +354,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -381,13 +381,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -408,13 +408,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -435,13 +435,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -462,13 +462,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -489,7 +489,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32.c @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -178,13 +178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, size_t vl) { + return vlseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -203,13 +203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, size_t vl) { + return vlseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -253,13 +253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -278,13 +278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -303,7 +303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -192,13 +192,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -219,13 +219,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -246,13 +246,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -273,13 +273,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -300,13 +300,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -327,7 +327,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64.c @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -103,13 +103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, size_t vl) { + return vlseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -111,13 +111,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -165,7 +165,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8.c @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -227,13 +227,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -252,13 +252,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -277,13 +277,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -302,13 +302,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -327,13 +327,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -352,13 +352,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -377,13 +377,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -402,7 +402,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -246,13 +246,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -273,13 +273,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -300,13 +300,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -327,13 +327,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -354,13 +354,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -381,13 +381,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -408,13 +408,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -435,7 +435,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c @@ -204,196 +204,196 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16mf4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_i16mf4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_i16mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_i16m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_i16m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_i16m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_u16mf4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_u16mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_u16m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_u16m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_u16m4_m(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c @@ -165,157 +165,157 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_i32mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_i32m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_i32m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_i32m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_u32mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_u32m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_u32m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_u32m4_m(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c @@ -126,118 +126,118 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_f64m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_f64m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_f64m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_i64m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_i64m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_i64m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_u64m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_u64m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_u64m4_m(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c @@ -164,157 +164,157 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8mf8_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8mf4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8mf8_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8mf4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8m4_m(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -198,13 +198,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16mf4_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -243,13 +243,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_i16mf4_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -273,13 +273,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_i16mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -288,13 +288,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_i16m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -303,13 +303,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_i16m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -318,13 +318,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_u16mf4_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -333,13 +333,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_u16mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -348,13 +348,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_u16m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_u16m2_m(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_f32mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -168,13 +168,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_f32m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -183,13 +183,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_f32m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -198,13 +198,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_i32mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_i32m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_i32m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -243,13 +243,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_u32mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_u32m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_u32m2_m(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_f64m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -123,13 +123,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_f64m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_i64m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_i64m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -168,13 +168,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_u64m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_u64m2_m(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c @@ -158,7 +158,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -167,13 +167,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_i8mf8_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -182,13 +182,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_i8mf4_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -197,13 +197,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_i8mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_i8m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -227,13 +227,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_i8m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -242,13 +242,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_u8mf8_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -257,13 +257,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_u8mf4_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -272,13 +272,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_u8mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -287,13 +287,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_u8m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -302,7 +302,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_u8m2_m(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -224,13 +224,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -241,13 +241,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -275,13 +275,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -292,13 +292,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -309,13 +309,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -326,13 +326,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -343,13 +343,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -360,13 +360,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -377,13 +377,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -394,13 +394,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -411,7 +411,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -190,13 +190,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -207,13 +207,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -224,13 +224,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -241,13 +241,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -258,13 +258,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -275,13 +275,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -292,13 +292,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -309,7 +309,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c @@ -111,7 +111,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -139,13 +139,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -156,13 +156,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -190,13 +190,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -189,13 +189,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -206,13 +206,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -223,13 +223,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -240,13 +240,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -257,13 +257,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -274,13 +274,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -291,13 +291,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -308,13 +308,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -325,13 +325,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -342,7 +342,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -193,13 +193,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -231,13 +231,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -250,13 +250,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -269,13 +269,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -288,13 +288,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -307,13 +307,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -326,13 +326,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -345,7 +345,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -136,13 +136,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -155,13 +155,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -174,13 +174,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -193,13 +193,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -231,7 +231,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c @@ -66,7 +66,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -79,13 +79,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -98,13 +98,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -192,13 +192,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -211,13 +211,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -230,13 +230,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -249,13 +249,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -268,13 +268,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -287,13 +287,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -306,7 +306,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -234,13 +234,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -255,13 +255,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -276,13 +276,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -297,13 +297,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -318,13 +318,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -339,13 +339,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -360,13 +360,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -150,13 +150,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -171,13 +171,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -192,13 +192,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -234,13 +234,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -255,7 +255,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -129,7 +129,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c @@ -176,7 +176,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -191,13 +191,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -233,13 +233,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -254,13 +254,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -275,13 +275,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -296,13 +296,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -317,13 +317,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -338,7 +338,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -233,13 +233,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -256,13 +256,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -279,13 +279,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -302,13 +302,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -325,13 +325,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -348,13 +348,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -371,13 +371,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -394,13 +394,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c @@ -147,7 +147,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -164,13 +164,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -187,13 +187,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -210,13 +210,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -233,13 +233,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -256,13 +256,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -279,7 +279,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -95,13 +95,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -118,13 +118,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -141,7 +141,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c @@ -192,7 +192,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -209,13 +209,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -232,13 +232,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -255,13 +255,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -278,13 +278,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -301,13 +301,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -324,13 +324,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -347,13 +347,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -253,13 +253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -278,13 +278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -303,13 +303,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -328,13 +328,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -353,13 +353,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -378,13 +378,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -403,13 +403,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -428,13 +428,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -178,13 +178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -203,13 +203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -228,13 +228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -253,13 +253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -278,13 +278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -303,7 +303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -103,13 +103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -227,13 +227,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -252,13 +252,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -277,13 +277,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -302,13 +302,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -327,13 +327,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -352,13 +352,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -377,13 +377,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -402,7 +402,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei16.c @@ -522,514 +522,514 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f16m1_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f16m2_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f16m4_m(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_f16m8_m(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vluxei16_v_f16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16_v_i8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16_v_i16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16_v_u8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16_v_u16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei32.c @@ -477,469 +477,469 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f16m1_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f16m2_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f16m4_m(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei64.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f16m1_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f16m2_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f64m8_m(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei8.c @@ -540,532 +540,532 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f16m1_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f16m2_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f16m4_m(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_f16m8_m(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vluxei8_v_f16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8_v_i8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) { + return vluxei8_v_i8m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8_v_i16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8_v_u8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) { + return vluxei8_v_u8m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8_v_u16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c @@ -633,625 +633,625 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxseg2ei16_v_i8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxseg2ei16_v_u8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c @@ -607,599 +607,599 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c @@ -542,534 +542,534 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c @@ -633,625 +633,625 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxseg2ei8_v_i8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxseg2ei8_v_u8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg3ei16_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg3ei16_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c @@ -534,7 +534,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -543,13 +543,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -558,13 +558,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg3ei8_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg3ei8_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c @@ -604,7 +604,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -615,13 +615,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -632,13 +632,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,7 +1193,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmax.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmax_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmax_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmax_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmax_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmax_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmax_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmax_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmax_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmax_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmax_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmax_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmax_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmax_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmax_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmax_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmax_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmax_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmax_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmax_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmax_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmax_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmax_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmax_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmax_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmax_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmax_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmax_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmax_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmax_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmax_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmax_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmax_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmax_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmax_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmax_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmax_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmax_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmax_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmax_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmax_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmax_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmax_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmax_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmaxu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmaxu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmaxu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmaxu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmaxu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmaxu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmaxu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmaxu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmaxu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmaxu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmaxu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmaxu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmaxu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmaxu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmaxu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmaxu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmaxu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmaxu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmaxu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmaxu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmaxu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmaxu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmaxu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmaxu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmaxu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmaxu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmaxu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmaxu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmaxu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmaxu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmaxu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmaxu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmaxu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmaxu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmaxu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmaxu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmaxu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmaxu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmaxu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmaxu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmaxu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmaxu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmaxu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmaxu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmaxu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfeq_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfeq_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfeq_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfeq_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfeq_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfeq_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfeq_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfeq_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfeq_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfeq_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfeq_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfeq_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfeq_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfeq_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfeq_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfeq_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfeq_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfeq_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfeq_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfeq_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfeq_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfeq_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfeq_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfeq_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfeq_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfeq_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfeq_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfeq_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfeq_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfeq_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfeq_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfge_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfge_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfge_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfge_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfge_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfge_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfge_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfge_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfge_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfge_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfge_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfge_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfge_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfge_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfge_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfge_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfge_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfge_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfge_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmfge_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfge_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfge_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfge_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmfge_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfge_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfge_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfge_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmfge_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfge_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfge_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfge_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmfge_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfge_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfge_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfge_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmfge_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfge_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfge_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfge_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmfge_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfge_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfge_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfge_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmfge_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfge_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfge_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfge_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmfge_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfgt_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfgt_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfgt_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfgt_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfgt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfgt_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfgt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfgt_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfgt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfgt_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfgt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfgt_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfgt_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfgt_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfgt_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfgt_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfgt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfgt_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfgt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfgt_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfgt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfgt_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfgt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfgt_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfgt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfgt_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfgt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfgt_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfgt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfgt_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfgt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfgt_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfle_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfle_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfle_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfle_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfle_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfle_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfle_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfle_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfle_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfle_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfle_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfle_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfle_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfle_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfle_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfle_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfle_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfle_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmfle_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfle_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfle_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfle_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmfle_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfle_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfle_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfle_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmfle_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfle_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfle_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfle_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmfle_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfle_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfle_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfle_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmfle_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfle_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfle_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfle_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmfle_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfle_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfle_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfle_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmfle_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfle_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfle_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfle_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmfle_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmflt_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmflt_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmflt_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmflt_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmflt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmflt_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmflt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmflt_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmflt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmflt_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmflt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmflt_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmflt_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmflt_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmflt_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmflt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmflt_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmflt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmflt_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmflt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmflt_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmflt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmflt_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmflt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmflt_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmflt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmflt_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmflt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmflt_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmflt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmflt_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmflt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmflt_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmflt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmflt_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmflt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmflt_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmflt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmflt_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmflt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmflt_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmflt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmflt_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmflt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmflt_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmflt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmflt_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfne_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfne_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfne_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfne_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfne_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfne_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfne_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfne_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfne_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfne_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfne_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfne_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfne_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfne_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfne_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfne_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfne_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfne_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmfne_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfne_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfne_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfne_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmfne_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfne_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfne_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfne_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmfne_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfne_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfne_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfne_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmfne_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfne_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfne_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfne_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmfne_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfne_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfne_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfne_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmfne_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfne_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfne_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfne_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmfne_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfne_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfne_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfne_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmfne_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmin.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmin_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmin_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmin_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmin_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmin_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmin_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmin_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmin_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmin_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmin_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmin_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmin_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmin_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmin_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmin_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmin_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmin_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmin_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmin_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmin_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmin_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmin_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmin_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmin_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmin_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmin_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmin_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmin_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmin_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmin_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmin_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmin_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmin_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmin_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmin_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmin_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmin_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmin_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmin_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmin_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmin_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmin_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmin_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vminu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vminu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vminu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vminu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vminu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vminu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vminu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vminu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vminu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vminu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vminu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vminu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vminu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vminu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vminu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vminu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vminu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vminu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vminu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vminu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vminu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vminu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vminu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vminu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vminu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vminu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vminu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vminu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vminu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vminu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vminu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vminu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vminu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vminu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vminu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vminu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vminu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vminu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vminu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vminu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vminu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vminu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vminu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vminu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vminu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vminu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbf.c @@ -71,64 +71,64 @@ // CHECK-RV64-LABEL: @test_vmsbf_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return vmsbf_m_b1_m(mask, maskedoff, op1, vl); +vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { + return vmsbf_m_b1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return vmsbf_m_b2_m(mask, maskedoff, op1, vl); +vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { + return vmsbf_m_b2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return vmsbf_m_b4_m(mask, maskedoff, op1, vl); +vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsbf_m_b4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return vmsbf_m_b8_m(mask, maskedoff, op1, vl); +vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return vmsbf_m_b8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return vmsbf_m_b16_m(mask, maskedoff, op1, vl); +vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return vmsbf_m_b16_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return vmsbf_m_b32_m(mask, maskedoff, op1, vl); +vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return vmsbf_m_b32_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return vmsbf_m_b64_m(mask, maskedoff, op1, vl); +vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return vmsbf_m_b64_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmseq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmseq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmseq.c @@ -801,793 +801,793 @@ // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmseq_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmseq_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmseq_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmseq_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmseq_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmseq_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmseq_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmseq_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmseq_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmseq_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmseq_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmseq_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmseq_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmseq_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmseq_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmseq_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmseq_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmseq_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmseq_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmseq_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmseq_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmseq_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmseq_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmseq_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmseq_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmseq_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmseq_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmseq_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmseq_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmseq_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmseq_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmseq_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmseq_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmseq_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmseq_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmseq_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmseq_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmseq_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmseq_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmseq_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmseq_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmseq_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmseq_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmseq_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmseq_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmseq_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmseq_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmseq_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmseq_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmseq_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmseq_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmseq_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmseq_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmseq_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmseq_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmseq_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmseq_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmseq_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmseq_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmseq_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmseq_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmseq_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmseq_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmseq_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmseq_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmseq_vx_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmseq_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmseq_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmseq_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmseq_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmseq_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmseq_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmseq_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmseq_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmseq_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmseq_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmseq_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmseq_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmseq_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmseq_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmseq_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmseq_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmseq_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmseq_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmseq_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmseq_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmseq_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmseq_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmseq_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmseq_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmseq_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmseq_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmseq_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmseq_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmseq_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmseq_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmseq_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmseq_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmseq_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmseq_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmseq_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmseq_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmseq_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmseq_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmseq_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmseq_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmseq_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmseq_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmseq_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmseq_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmseq_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmseq_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmseq_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmseq_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmseq_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmseq_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmseq_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmseq_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmseq_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmseq_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmseq_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmseq_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmseq_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmseq_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmseq_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmseq_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmseq_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmseq_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmseq_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmseq_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmseq_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmseq_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsge.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsge_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmsge_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmsge_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsge_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmsge_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmsge_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsge_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmsge_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmsge_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsge_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmsge_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmsge_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsge_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmsge_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmsge_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsge_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmsge_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmsge_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsge_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmsge_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmsge_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsge_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmsge_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmsge_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsge_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmsge_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmsge_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsge_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmsge_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmsge_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsge_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmsge_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmsge_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsge_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmsge_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmsge_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsge_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmsge_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmsge_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsge_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsge_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsge_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsge_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmsge_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmsge_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsge_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmsge_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmsge_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsge_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmsge_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmsge_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsge_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmsge_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmsge_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsge_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmsge_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmsge_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsge_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmsge_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmsge_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsge_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmsge_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmsge_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsge_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmsge_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmsge_vx_i64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgeu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgeu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgeu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgeu.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsgeu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmsgeu_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsgeu_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsgeu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmsgeu_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsgeu_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsgeu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmsgeu_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsgeu_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsgeu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsgeu_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsgeu_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsgeu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsgeu_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsgeu_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsgeu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsgeu_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsgeu_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsgeu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsgeu_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsgeu_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsgeu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmsgeu_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsgeu_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsgeu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmsgeu_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsgeu_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsgeu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmsgeu_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsgeu_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsgeu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsgeu_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsgeu_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsgeu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsgeu_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsgeu_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsgeu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsgeu_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsgeu_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsgeu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgeu_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgeu_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsgeu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmsgeu_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsgeu_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsgeu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmsgeu_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsgeu_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsgeu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsgeu_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsgeu_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsgeu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsgeu_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsgeu_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsgeu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmsgeu_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsgeu_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsgeu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmsgeu_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsgeu_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsgeu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmsgeu_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsgeu_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsgeu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsgeu_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsgeu_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgt.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsgt_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmsgt_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmsgt_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsgt_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmsgt_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmsgt_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsgt_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmsgt_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmsgt_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsgt_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmsgt_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmsgt_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsgt_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmsgt_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmsgt_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsgt_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmsgt_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmsgt_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsgt_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmsgt_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmsgt_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsgt_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmsgt_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmsgt_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsgt_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmsgt_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmsgt_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsgt_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmsgt_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmsgt_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsgt_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmsgt_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmsgt_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsgt_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmsgt_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmsgt_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsgt_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmsgt_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmsgt_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsgt_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsgt_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsgt_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsgt_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmsgt_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmsgt_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsgt_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmsgt_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmsgt_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsgt_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmsgt_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmsgt_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsgt_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmsgt_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmsgt_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsgt_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmsgt_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmsgt_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsgt_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmsgt_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmsgt_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsgt_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmsgt_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmsgt_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsgt_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmsgt_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmsgt_vx_i64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgtu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgtu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgtu.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsgtu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmsgtu_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsgtu_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsgtu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmsgtu_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsgtu_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsgtu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmsgtu_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsgtu_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsgtu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsgtu_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsgtu_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsgtu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsgtu_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsgtu_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsgtu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsgtu_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsgtu_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsgtu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsgtu_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsgtu_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsgtu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmsgtu_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsgtu_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsgtu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmsgtu_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsgtu_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsgtu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmsgtu_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsgtu_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsgtu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsgtu_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsgtu_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsgtu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsgtu_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsgtu_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsgtu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsgtu_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsgtu_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsgtu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgtu_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgtu_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsgtu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmsgtu_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsgtu_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsgtu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmsgtu_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsgtu_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsgtu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsgtu_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsgtu_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsgtu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsgtu_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsgtu_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsgtu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmsgtu_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsgtu_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsgtu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmsgtu_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsgtu_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsgtu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmsgtu_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsgtu_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsgtu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsgtu_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsgtu_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsif.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsif.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsif.c @@ -71,64 +71,64 @@ // CHECK-RV64-LABEL: @test_vmsif_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return vmsif_m_b1_m(mask, maskedoff, op1, vl); +vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { + return vmsif_m_b1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return vmsif_m_b2_m(mask, maskedoff, op1, vl); +vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { + return vmsif_m_b2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return vmsif_m_b4_m(mask, maskedoff, op1, vl); +vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsif_m_b4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return vmsif_m_b8_m(mask, maskedoff, op1, vl); +vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return vmsif_m_b8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return vmsif_m_b16_m(mask, maskedoff, op1, vl); +vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return vmsif_m_b16_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return vmsif_m_b32_m(mask, maskedoff, op1, vl); +vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return vmsif_m_b32_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return vmsif_m_b64_m(mask, maskedoff, op1, vl); +vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return vmsif_m_b64_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsle.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsle_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmsle_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsle_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmsle_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsle_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmsle_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsle_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmsle_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsle_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmsle_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsle_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmsle_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsle_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmsle_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsle_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmsle_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsle_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmsle_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsle_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmsle_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsle_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmsle_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsle_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmsle_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsle_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmsle_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsle_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsle_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsle_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmsle_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsle_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmsle_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsle_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmsle_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsle_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmsle_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsle_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmsle_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsle_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmsle_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsle_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmsle_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsle_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmsle_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsleu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsleu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsleu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsleu.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsleu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmsleu_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsleu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmsleu_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsleu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmsleu_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsleu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsleu_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsleu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsleu_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsleu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsleu_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsleu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsleu_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsleu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmsleu_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsleu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmsleu_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsleu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmsleu_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsleu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsleu_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsleu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsleu_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsleu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsleu_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsleu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsleu_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsleu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmsleu_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsleu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmsleu_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsleu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsleu_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsleu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsleu_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsleu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmsleu_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsleu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmsleu_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsleu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmsleu_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsleu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsleu_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmslt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmslt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmslt.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmslt_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmslt_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmslt_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmslt_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmslt_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmslt_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmslt_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmslt_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmslt_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmslt_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmslt_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmslt_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmslt_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmslt_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmslt_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmslt_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmslt_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmslt_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmslt_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmslt_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmslt_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmslt_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmslt_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmslt_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmslt_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmslt_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmslt_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmslt_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmslt_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmslt_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmslt_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmslt_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmslt_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmslt_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmslt_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmslt_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmslt_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmslt_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmslt_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmslt_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmslt_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmslt_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmslt_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmslt_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmslt_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmslt_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmslt_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmslt_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmslt_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmslt_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmslt_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmslt_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmslt_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmslt_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmslt_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmslt_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmslt_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmslt_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmslt_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmslt_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmslt_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmslt_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmslt_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmslt_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmslt_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmslt_vx_i64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsltu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsltu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsltu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsltu.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsltu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmsltu_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsltu_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsltu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmsltu_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsltu_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsltu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmsltu_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsltu_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsltu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsltu_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsltu_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsltu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsltu_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsltu_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsltu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsltu_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsltu_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsltu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsltu_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsltu_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsltu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmsltu_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsltu_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsltu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmsltu_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsltu_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsltu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmsltu_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsltu_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsltu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsltu_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsltu_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsltu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsltu_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsltu_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsltu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsltu_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsltu_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsltu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsltu_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsltu_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsltu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmsltu_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsltu_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsltu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmsltu_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsltu_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsltu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsltu_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsltu_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsltu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsltu_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsltu_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsltu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmsltu_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsltu_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsltu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmsltu_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsltu_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsltu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmsltu_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsltu_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsltu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsltu_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsltu_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsne.c @@ -801,793 +801,793 @@ // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsne_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmsne_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmsne_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsne_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmsne_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmsne_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsne_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmsne_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmsne_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsne_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmsne_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmsne_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsne_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmsne_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmsne_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsne_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmsne_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmsne_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsne_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmsne_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmsne_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsne_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmsne_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmsne_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsne_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmsne_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmsne_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsne_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmsne_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmsne_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsne_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmsne_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmsne_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsne_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmsne_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmsne_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsne_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmsne_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmsne_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsne_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsne_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsne_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsne_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmsne_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmsne_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsne_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmsne_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmsne_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsne_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmsne_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmsne_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsne_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmsne_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmsne_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsne_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmsne_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmsne_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsne_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmsne_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmsne_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsne_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmsne_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmsne_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsne_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmsne_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmsne_vx_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsne_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmsne_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsne_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsne_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmsne_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsne_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsne_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmsne_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsne_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsne_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsne_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsne_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsne_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsne_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsne_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsne_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsne_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsne_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsne_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsne_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsne_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsne_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmsne_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsne_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsne_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmsne_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsne_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsne_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmsne_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsne_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsne_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsne_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsne_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsne_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsne_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsne_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsne_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsne_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsne_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsne_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsne_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsne_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsne_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmsne_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsne_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsne_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmsne_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsne_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsne_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsne_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsne_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsne_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsne_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsne_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsne_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmsne_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsne_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsne_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmsne_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsne_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsne_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmsne_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsne_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsne_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsne_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsne_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsof.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsof.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsof.c @@ -71,64 +71,64 @@ // CHECK-RV64-LABEL: @test_vmsof_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return vmsof_m_b1_m(mask, maskedoff, op1, vl); +vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { + return vmsof_m_b1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return vmsof_m_b2_m(mask, maskedoff, op1, vl); +vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { + return vmsof_m_b2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return vmsof_m_b4_m(mask, maskedoff, op1, vl); +vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsof_m_b4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return vmsof_m_b8_m(mask, maskedoff, op1, vl); +vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return vmsof_m_b8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return vmsof_m_b16_m(mask, maskedoff, op1, vl); +vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return vmsof_m_b16_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return vmsof_m_b32_m(mask, maskedoff, op1, vl); +vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return vmsof_m_b32_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return vmsof_m_b64_m(mask, maskedoff, op1, vl); +vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return vmsof_m_b64_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmul.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmul_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmul_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmul_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmul_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmul_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmul_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmul_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmul_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmul_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmul_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmul_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmul_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmul_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmul_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmul_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmul_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmul_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmul_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmul_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmul_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmul_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmul_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmul_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmul_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmul_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmul_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmul_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmul_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmul_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmul_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmul_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmul_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmul_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmul_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmul_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmul_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmul_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmul_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmul_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmul_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmul_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmul_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmul_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmul_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmul_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmul_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmul_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmul_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmul_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmul_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmul_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmul_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmul_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmul_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmul_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmul_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmul_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmul_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmul_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmul_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmul_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmul_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmul_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmul_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulh.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulh.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulh.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmulh_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmulh_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmulh_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmulh_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmulh_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmulh_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmulh_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmulh_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmulh_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmulh_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmulh_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmulh_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmulh_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmulh_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmulh_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmulh_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmulh_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmulh_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmulh_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmulh_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmulh_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhsu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhsu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhsu_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhsu_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhsu_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhsu_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhsu_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhsu_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhsu_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhsu_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhsu_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhsu_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhsu_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhsu_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhsu_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhsu_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhsu_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhsu_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhsu_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhsu_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhsu_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhsu_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhsu_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclip_wv_i8mf8_m(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnclip_wv_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf8_m(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclip_wv_i8mf4_m(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnclip_wv_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf4_m(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclip_wv_i8mf2_m(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnclip_wv_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf2_m(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclip_wv_i8m1_m(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclip_wv_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m1_m(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclip_wv_i8m2_m(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclip_wv_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m2_m(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclip_wv_i8m4_m(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclip_wv_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m4_m(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclip_wv_i16mf4_m(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { + return vnclip_wv_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf4_m(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclip_wv_i16mf2_m(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { + return vnclip_wv_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf2_m(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclip_wv_i16m1_m(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnclip_wv_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m1_m(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclip_wv_i16m2_m(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnclip_wv_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m2_m(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclip_wv_i16m4_m(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnclip_wv_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m4_m(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclip_wv_i32mf2_m(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_wv_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32mf2_m(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclip_wv_i32m1_m(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnclip_wv_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m1_m(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclip_wv_i32m2_m(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnclip_wv_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m2_m(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclip_wv_i32m4_m(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnclip_wv_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m4_m(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32m4_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclipu_wv_u8mf8_m(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnclipu_wv_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf8_m(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclipu_wv_u8mf4_m(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnclipu_wv_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf4_m(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclipu_wv_u8mf2_m(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnclipu_wv_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf2_m(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclipu_wv_u8m1_m(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclipu_wv_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m1_m(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclipu_wv_u8m2_m(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclipu_wv_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m2_m(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclipu_wv_u8m4_m(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclipu_wv_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m4_m(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclipu_wv_u16mf4_m(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { + return vnclipu_wv_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf4_m(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclipu_wv_u16mf2_m(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { + return vnclipu_wv_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf2_m(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclipu_wv_u16m1_m(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnclipu_wv_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m1_m(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclipu_wv_u16m2_m(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnclipu_wv_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m2_m(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclipu_wv_u16m4_m(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnclipu_wv_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m4_m(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclipu_wv_u32mf2_m(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_wv_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32mf2_m(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclipu_wv_u32m1_m(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnclipu_wv_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m1_m(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclipu_wv_u32m2_m(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnclipu_wv_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m2_m(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclipu_wv_u32m4_m(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnclipu_wv_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m4_m(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32m4_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vncvt.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return vncvt_x_x_w_i8mf8_m(mask, maskedoff, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint16mf4_t src, size_t vl) { + return vncvt_x_x_w_i8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return vncvt_x_x_w_i8mf4_m(mask, maskedoff, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint16mf2_t src, size_t vl) { + return vncvt_x_x_w_i8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return vncvt_x_x_w_i8mf2_m(mask, maskedoff, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint16m1_t src, size_t vl) { + return vncvt_x_x_w_i8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return vncvt_x_x_w_i8m1_m(mask, maskedoff, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint16m2_t src, size_t vl) { + return vncvt_x_x_w_i8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return vncvt_x_x_w_i8m2_m(mask, maskedoff, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint16m4_t src, size_t vl) { + return vncvt_x_x_w_i8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return vncvt_x_x_w_i8m4_m(mask, maskedoff, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint16m8_t src, size_t vl) { + return vncvt_x_x_w_i8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return vncvt_x_x_w_u8mf8_m(mask, maskedoff, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { + return vncvt_x_x_w_u8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return vncvt_x_x_w_u8mf4_m(mask, maskedoff, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { + return vncvt_x_x_w_u8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return vncvt_x_x_w_u8mf2_m(mask, maskedoff, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { + return vncvt_x_x_w_u8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return vncvt_x_x_w_u8m1_m(mask, maskedoff, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint16m2_t src, size_t vl) { + return vncvt_x_x_w_u8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return vncvt_x_x_w_u8m2_m(mask, maskedoff, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint16m4_t src, size_t vl) { + return vncvt_x_x_w_u8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return vncvt_x_x_w_u8m4_m(mask, maskedoff, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint16m8_t src, size_t vl) { + return vncvt_x_x_w_u8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vncvt_x_x_w_i16mf4_m(mask, maskedoff, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vncvt_x_x_w_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vncvt_x_x_w_i16mf2_m(mask, maskedoff, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vncvt_x_x_w_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vncvt_x_x_w_i16m1_m(mask, maskedoff, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vncvt_x_x_w_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vncvt_x_x_w_i16m2_m(mask, maskedoff, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vncvt_x_x_w_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vncvt_x_x_w_i16m4_m(mask, maskedoff, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { + return vncvt_x_x_w_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vncvt_x_x_w_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vncvt_x_x_w_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vncvt_x_x_w_u16mf2_m(mask, maskedoff, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vncvt_x_x_w_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vncvt_x_x_w_u16m1_m(mask, maskedoff, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vncvt_x_x_w_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vncvt_x_x_w_u16m2_m(mask, maskedoff, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vncvt_x_x_w_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vncvt_x_x_w_u16m4_m(mask, maskedoff, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { + return vncvt_x_x_w_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vncvt_x_x_w_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { + return vncvt_x_x_w_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vncvt_x_x_w_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { + return vncvt_x_x_w_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vncvt_x_x_w_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { + return vncvt_x_x_w_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vncvt_x_x_w_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { + return vncvt_x_x_w_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vncvt_x_x_w_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { + return vncvt_x_x_w_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vncvt_x_x_w_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { + return vncvt_x_x_w_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vncvt_x_x_w_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { + return vncvt_x_x_w_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vncvt_x_x_w_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { + return vncvt_x_x_w_u32m4_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vneg.c @@ -207,199 +207,199 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vneg_v_i8mf8_m(mask, maskedoff, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return vneg_v_i8mf8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vneg_v_i8mf4_m(mask, maskedoff, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return vneg_v_i8mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vneg_v_i8mf2_m(mask, maskedoff, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return vneg_v_i8mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vneg_v_i8m1_m(mask, maskedoff, op1, vl); +vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return vneg_v_i8m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vneg_v_i8m2_m(mask, maskedoff, op1, vl); +vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { + return vneg_v_i8m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vneg_v_i8m4_m(mask, maskedoff, op1, vl); +vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { + return vneg_v_i8m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vneg_v_i8m8_m(mask, maskedoff, op1, vl); +vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { + return vneg_v_i8m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vneg_v_i16mf4_m(mask, maskedoff, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { + return vneg_v_i16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vneg_v_i16mf2_m(mask, maskedoff, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { + return vneg_v_i16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vneg_v_i16m1_m(mask, maskedoff, op1, vl); +vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { + return vneg_v_i16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vneg_v_i16m2_m(mask, maskedoff, op1, vl); +vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { + return vneg_v_i16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vneg_v_i16m4_m(mask, maskedoff, op1, vl); +vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { + return vneg_v_i16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vneg_v_i16m8_m(mask, maskedoff, op1, vl); +vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { + return vneg_v_i16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vneg_v_i32mf2_m(mask, maskedoff, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vneg_v_i32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vneg_v_i32m1_m(mask, maskedoff, op1, vl); +vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { + return vneg_v_i32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vneg_v_i32m2_m(mask, maskedoff, op1, vl); +vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { + return vneg_v_i32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vneg_v_i32m4_m(mask, maskedoff, op1, vl); +vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { + return vneg_v_i32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vneg_v_i32m8_m(mask, maskedoff, op1, vl); +vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { + return vneg_v_i32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vneg_v_i64m1_m(mask, maskedoff, op1, vl); +vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { + return vneg_v_i64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vneg_v_i64m2_m(mask, maskedoff, op1, vl); +vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { + return vneg_v_i64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vneg_v_i64m4_m(mask, maskedoff, op1, vl); +vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { + return vneg_v_i64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vneg_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vneg_v_i64m8_m(mask, maskedoff, op1, vl); +vint64m8_t test_vneg_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { + return vneg_v_i64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnot.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnot.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vnot_v_i8mf8_m(mask, maskedoff, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return vnot_v_i8mf8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vnot_v_i8mf4_m(mask, maskedoff, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return vnot_v_i8mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vnot_v_i8mf2_m(mask, maskedoff, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return vnot_v_i8mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vnot_v_i8m1_m(mask, maskedoff, op1, vl); +vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return vnot_v_i8m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vnot_v_i8m2_m(mask, maskedoff, op1, vl); +vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { + return vnot_v_i8m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vnot_v_i8m4_m(mask, maskedoff, op1, vl); +vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { + return vnot_v_i8m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vnot_v_i8m8_m(mask, maskedoff, op1, vl); +vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { + return vnot_v_i8m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vnot_v_i16mf4_m(mask, maskedoff, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { + return vnot_v_i16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vnot_v_i16mf2_m(mask, maskedoff, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { + return vnot_v_i16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vnot_v_i16m1_m(mask, maskedoff, op1, vl); +vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { + return vnot_v_i16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vnot_v_i16m2_m(mask, maskedoff, op1, vl); +vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { + return vnot_v_i16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vnot_v_i16m4_m(mask, maskedoff, op1, vl); +vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { + return vnot_v_i16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vnot_v_i16m8_m(mask, maskedoff, op1, vl); +vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { + return vnot_v_i16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vnot_v_i32mf2_m(mask, maskedoff, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vnot_v_i32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vnot_v_i32m1_m(mask, maskedoff, op1, vl); +vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { + return vnot_v_i32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vnot_v_i32m2_m(mask, maskedoff, op1, vl); +vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { + return vnot_v_i32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vnot_v_i32m4_m(mask, maskedoff, op1, vl); +vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { + return vnot_v_i32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vnot_v_i32m8_m(mask, maskedoff, op1, vl); +vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { + return vnot_v_i32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vnot_v_i64m1_m(mask, maskedoff, op1, vl); +vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { + return vnot_v_i64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vnot_v_i64m2_m(mask, maskedoff, op1, vl); +vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { + return vnot_v_i64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vnot_v_i64m4_m(mask, maskedoff, op1, vl); +vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { + return vnot_v_i64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vnot_v_i64m8_m(mask, maskedoff, op1, vl); +vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { + return vnot_v_i64m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vnot_v_u8mf8_m(mask, maskedoff, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return vnot_v_u8mf8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vnot_v_u8mf4_m(mask, maskedoff, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return vnot_v_u8mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vnot_v_u8mf2_m(mask, maskedoff, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return vnot_v_u8mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return vnot_v_u8m1_m(mask, maskedoff, op1, vl); +vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return vnot_v_u8m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return vnot_v_u8m2_m(mask, maskedoff, op1, vl); +vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { + return vnot_v_u8m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return vnot_v_u8m4_m(mask, maskedoff, op1, vl); +vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { + return vnot_v_u8m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return vnot_v_u8m8_m(mask, maskedoff, op1, vl); +vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t vl) { + return vnot_v_u8m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vnot_v_u16mf4_m(mask, maskedoff, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { + return vnot_v_u16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vnot_v_u16mf2_m(mask, maskedoff, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { + return vnot_v_u16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return vnot_v_u16m1_m(mask, maskedoff, op1, vl); +vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { + return vnot_v_u16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return vnot_v_u16m2_m(mask, maskedoff, op1, vl); +vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { + return vnot_v_u16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return vnot_v_u16m4_m(mask, maskedoff, op1, vl); +vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { + return vnot_v_u16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return vnot_v_u16m8_m(mask, maskedoff, op1, vl); +vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t vl) { + return vnot_v_u16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vnot_v_u32mf2_m(mask, maskedoff, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { + return vnot_v_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return vnot_v_u32m1_m(mask, maskedoff, op1, vl); +vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { + return vnot_v_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return vnot_v_u32m2_m(mask, maskedoff, op1, vl); +vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { + return vnot_v_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return vnot_v_u32m4_m(mask, maskedoff, op1, vl); +vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { + return vnot_v_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return vnot_v_u32m8_m(mask, maskedoff, op1, vl); +vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t vl) { + return vnot_v_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return vnot_v_u64m1_m(mask, maskedoff, op1, vl); +vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t vl) { + return vnot_v_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return vnot_v_u64m2_m(mask, maskedoff, op1, vl); +vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t vl) { + return vnot_v_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return vnot_v_u64m4_m(mask, maskedoff, op1, vl); +vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t vl) { + return vnot_v_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vnot_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return vnot_v_u64m8_m(mask, maskedoff, op1, vl); +vuint64m8_t test_vnot_v_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t vl) { + return vnot_v_u64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsra.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsra_wv_i8mf8_m(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnsra_wv_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf8_m(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { + return vnsra_wx_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsra_wv_i8mf4_m(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnsra_wv_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf4_m(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { + return vnsra_wx_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsra_wv_i8mf2_m(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnsra_wv_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf2_m(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { + return vnsra_wx_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsra_wv_i8m1_m(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnsra_wv_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m1_m(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { + return vnsra_wx_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsra_wv_i8m2_m(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnsra_wv_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m2_m(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { + return vnsra_wx_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsra_wv_i8m4_m(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnsra_wv_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m4_m(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { + return vnsra_wx_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsra_wv_i16mf4_m(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { + return vnsra_wv_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf4_m(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vnsra_wx_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsra_wv_i16mf2_m(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { + return vnsra_wv_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf2_m(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { + return vnsra_wx_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsra_wv_i16m1_m(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnsra_wv_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m1_m(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { + return vnsra_wx_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsra_wv_i16m2_m(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnsra_wv_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m2_m(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { + return vnsra_wx_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsra_wv_i16m4_m(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnsra_wv_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m4_m(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { + return vnsra_wx_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsra_wv_i32mf2_m(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_wv_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32mf2_m(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_wx_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsra_wv_i32m1_m(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnsra_wv_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m1_m(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + return vnsra_wx_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsra_wv_i32m2_m(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnsra_wv_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m2_m(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { + return vnsra_wx_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsra_wv_i32m4_m(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnsra_wv_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m4_m(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { + return vnsra_wx_i32m4_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsrl.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsrl_wv_u8mf8_m(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnsrl_wv_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf8_m(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsrl_wv_u8mf4_m(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnsrl_wv_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf4_m(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsrl_wv_u8mf2_m(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnsrl_wv_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf2_m(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsrl_wv_u8m1_m(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnsrl_wv_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m1_m(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsrl_wv_u8m2_m(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnsrl_wv_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m2_m(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsrl_wv_u8m4_m(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnsrl_wv_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m4_m(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsrl_wv_u16mf4_m(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { + return vnsrl_wv_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf4_m(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsrl_wv_u16mf2_m(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { + return vnsrl_wv_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf2_m(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsrl_wv_u16m1_m(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnsrl_wv_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m1_m(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsrl_wv_u16m2_m(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnsrl_wv_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m2_m(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsrl_wv_u16m4_m(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnsrl_wv_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m4_m(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsrl_wv_u32mf2_m(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_wv_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32mf2_m(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsrl_wv_u32m1_m(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnsrl_wv_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m1_m(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsrl_wv_u32m2_m(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnsrl_wv_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m2_m(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsrl_wv_u32m4_m(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnsrl_wv_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m4_m(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u32m4_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vor.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vor_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vor_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vor_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vor_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vor_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vor_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vor_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vor_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vor_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vor_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vor_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vor_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vor_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vor_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vor_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vor_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vor_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vor_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vor_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vor_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vor_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vor_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vor_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vor_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vor_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vor_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vor_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vor_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vor_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vor_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vor_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vor_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vor_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vor_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vor_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vor_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vor_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vor_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vor_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vor_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vor_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vor_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vor_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vor_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vor_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vor_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vor_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vor_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vor_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vor_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vor_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vor_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vor_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vor_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vor_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vor_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vor_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vor_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vor_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vor_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vor_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vor_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vor_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vor_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vor_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vor_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vor_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vor_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vor_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vor_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vor_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vor_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vor_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vor_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vor_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vor_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vor_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vor_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vor_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c @@ -206,199 +206,199 @@ // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c @@ -206,199 +206,199 @@ // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c @@ -206,199 +206,199 @@ // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c @@ -206,199 +206,199 @@ // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrem.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrem.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vrem_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vrem_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vrem_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vrem_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vrem_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vrem_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vrem_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vrem_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vrem_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vrem_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vrem_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vrem_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vrem_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vrem_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vrem_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vrem_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vrem_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vrem_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vrem_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vrem_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vrem_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vrem_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vrem_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vrem_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vrem_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vrem_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vrem_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vrem_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vrem_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vrem_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vrem_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vrem_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vrem_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vrem_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vrem_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vrem_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vrem_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vrem_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vrem_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vrem_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vrem_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vrem_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vrem_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vremu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vremu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vremu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vremu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vremu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vremu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vremu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vremu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vremu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vremu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vremu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vremu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vremu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vremu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vremu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vremu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vremu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vremu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vremu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vremu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vremu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vremu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vremu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vremu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vremu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vremu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vremu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vremu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vremu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vremu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vremu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vremu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vremu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vremu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vremu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vremu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vremu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vremu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vremu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vremu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vremu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vremu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vremu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vremu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vremu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vremu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vremu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c @@ -1071,1063 +1071,1063 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_f16mf4_m(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather_vv_f16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf4_m(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t index, size_t vl) { + return vrgather_vx_f16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_f16mf2_m(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather_vv_f16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf2_m(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_f16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_f16m1_m(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather_vv_f16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m1_m(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_f16m2_m(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather_vv_f16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m2_m(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_f16m4_m(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather_vv_f16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m4_m(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_f16m8_m(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather_vv_f16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m8_m(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_f32mf2_m(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_f32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32mf2_m(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_f32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_f32m1_m(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { + return vrgather_vv_f32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m1_m(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t index, size_t vl) { + return vrgather_vx_f32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_f32m2_m(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { + return vrgather_vv_f32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m2_m(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t index, size_t vl) { + return vrgather_vx_f32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_f32m4_m(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { + return vrgather_vv_f32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m4_m(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t index, size_t vl) { + return vrgather_vx_f32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_f32m8_m(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { + return vrgather_vv_f32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m8_m(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t index, size_t vl) { + return vrgather_vx_f32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_f64m1_m(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { + return vrgather_vv_f64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m1_m(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t index, size_t vl) { + return vrgather_vx_f64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_f64m2_m(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { + return vrgather_vv_f64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m2_m(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t index, size_t vl) { + return vrgather_vx_f64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_f64m4_m(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { + return vrgather_vv_f64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m4_m(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t index, size_t vl) { + return vrgather_vx_f64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_f64m8_m(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { + return vrgather_vv_f64m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m8_m(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t index, size_t vl) { + return vrgather_vx_f64m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_i8mf8_m(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { + return vrgather_vv_i8mf8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf8_m(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t index, size_t vl) { + return vrgather_vx_i8mf8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_i8mf4_m(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { + return vrgather_vv_i8mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf4_m(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t index, size_t vl) { + return vrgather_vx_i8mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_i8mf2_m(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { + return vrgather_vv_i8mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf2_m(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_i8mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_i8m1_m(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t index, size_t vl) { + return vrgather_vv_i8m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m1_m(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t index, size_t vl) { + return vrgather_vx_i8m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_i8m2_m(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t index, size_t vl) { + return vrgather_vv_i8m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m2_m(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t index, size_t vl) { + return vrgather_vx_i8m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_i8m4_m(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t index, size_t vl) { + return vrgather_vv_i8m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m4_m(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t index, size_t vl) { + return vrgather_vx_i8m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_i8m8_m(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t index, size_t vl) { + return vrgather_vv_i8m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m8_m(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t index, size_t vl) { + return vrgather_vx_i8m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_i16mf4_m(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather_vv_i16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf4_m(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t index, size_t vl) { + return vrgather_vx_i16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_i16mf2_m(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather_vv_i16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf2_m(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_i16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_i16m1_m(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather_vv_i16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m1_m(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t index, size_t vl) { + return vrgather_vx_i16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_i16m2_m(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather_vv_i16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m2_m(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t index, size_t vl) { + return vrgather_vx_i16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_i16m4_m(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather_vv_i16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m4_m(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t index, size_t vl) { + return vrgather_vx_i16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_i16m8_m(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather_vv_i16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m8_m(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t index, size_t vl) { + return vrgather_vx_i16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_i32mf2_m(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_i32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32mf2_m(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_i32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_i32m1_m(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t index, size_t vl) { + return vrgather_vv_i32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m1_m(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t index, size_t vl) { + return vrgather_vx_i32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_i32m2_m(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t index, size_t vl) { + return vrgather_vv_i32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m2_m(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t index, size_t vl) { + return vrgather_vx_i32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_i32m4_m(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t index, size_t vl) { + return vrgather_vv_i32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m4_m(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t index, size_t vl) { + return vrgather_vx_i32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_i32m8_m(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t index, size_t vl) { + return vrgather_vv_i32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m8_m(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t index, size_t vl) { + return vrgather_vx_i32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_i64m1_m(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t index, size_t vl) { + return vrgather_vv_i64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m1_m(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t index, size_t vl) { + return vrgather_vx_i64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_i64m2_m(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t index, size_t vl) { + return vrgather_vv_i64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m2_m(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t index, size_t vl) { + return vrgather_vx_i64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_i64m4_m(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t index, size_t vl) { + return vrgather_vv_i64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m4_m(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t index, size_t vl) { + return vrgather_vx_i64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_i64m8_m(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t index, size_t vl) { + return vrgather_vv_i64m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m8_m(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t index, size_t vl) { + return vrgather_vx_i64m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_u8mf8_m(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { + return vrgather_vv_u8mf8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf8_m(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t index, size_t vl) { + return vrgather_vx_u8mf8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_u8mf4_m(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { + return vrgather_vv_u8mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf4_m(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t index, size_t vl) { + return vrgather_vx_u8mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_u8mf2_m(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { + return vrgather_vv_u8mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf2_m(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_u8mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_u8m1_m(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t index, size_t vl) { + return vrgather_vv_u8m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m1_m(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t index, size_t vl) { + return vrgather_vx_u8m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_u8m2_m(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t index, size_t vl) { + return vrgather_vv_u8m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m2_m(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t index, size_t vl) { + return vrgather_vx_u8m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_u8m4_m(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t index, size_t vl) { + return vrgather_vv_u8m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m4_m(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t index, size_t vl) { + return vrgather_vx_u8m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_u8m8_m(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t index, size_t vl) { + return vrgather_vv_u8m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m8_m(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t index, size_t vl) { + return vrgather_vx_u8m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_u16mf4_m(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather_vv_u16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf4_m(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t index, size_t vl) { + return vrgather_vx_u16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_u16mf2_m(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather_vv_u16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf2_m(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_u16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_u16m1_m(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather_vv_u16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m1_m(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t index, size_t vl) { + return vrgather_vx_u16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_u16m2_m(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather_vv_u16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m2_m(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t index, size_t vl) { + return vrgather_vx_u16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_u16m4_m(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather_vv_u16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m4_m(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t index, size_t vl) { + return vrgather_vx_u16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_u16m8_m(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather_vv_u16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m8_m(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t index, size_t vl) { + return vrgather_vx_u16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_u32mf2_m(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_u32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32mf2_m(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_u32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_u32m1_m(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t index, size_t vl) { + return vrgather_vv_u32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m1_m(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t index, size_t vl) { + return vrgather_vx_u32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_u32m2_m(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t index, size_t vl) { + return vrgather_vv_u32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m2_m(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t index, size_t vl) { + return vrgather_vx_u32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_u32m4_m(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t index, size_t vl) { + return vrgather_vv_u32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m4_m(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t index, size_t vl) { + return vrgather_vx_u32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_u32m8_m(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t index, size_t vl) { + return vrgather_vv_u32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m8_m(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t index, size_t vl) { + return vrgather_vx_u32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_u64m1_m(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t index, size_t vl) { + return vrgather_vv_u64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m1_m(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t index, size_t vl) { + return vrgather_vx_u64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_u64m2_m(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t index, size_t vl) { + return vrgather_vv_u64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m2_m(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t index, size_t vl) { + return vrgather_vx_u64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_u64m4_m(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t index, size_t vl) { + return vrgather_vv_u64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m4_m(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t index, size_t vl) { + return vrgather_vx_u64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_u64m8_m(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t index, size_t vl) { + return vrgather_vv_u64m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m8_m(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t index, size_t vl) { + return vrgather_vx_u64m8_m(mask, op1, index, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgatherei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgatherei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgatherei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgatherei16.c @@ -522,514 +522,514 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrsub.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsadd_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsadd_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsadd_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsadd_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsadd_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsadd_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsadd_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsadd_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsadd_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsadd_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsadd_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsadd_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsadd_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsadd_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsadd_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsadd_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsadd_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsadd_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsadd_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsadd_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsadd_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsaddu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsaddu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsaddu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsaddu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsaddu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsaddu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsaddu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vsaddu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vsaddu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsaddu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsaddu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsaddu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsaddu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsaddu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsaddu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsaddu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsaddu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsaddu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsaddu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsaddu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsaddu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c @@ -260,253 +260,253 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf2_i16mf4_m(mask, maskedoff, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return vsext_vf2_i16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf2_i16mf2_m(mask, maskedoff, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return vsext_vf2_i16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf2_i16m1_m(mask, maskedoff, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return vsext_vf2_i16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf2_i16m2_m(mask, maskedoff, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return vsext_vf2_i16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf2_i16m4_m(mask, maskedoff, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint8m2_t op1, size_t vl) { + return vsext_vf2_i16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return vsext_vf2_i16m8_m(mask, maskedoff, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint8m4_t op1, size_t vl) { + return vsext_vf2_i16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf4_i32mf2_m(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return vsext_vf4_i32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf4_i32m1_m(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return vsext_vf4_i32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf4_i32m2_m(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return vsext_vf4_i32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf4_i32m4_m(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return vsext_vf4_i32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf4_i32m8_m(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) { + return vsext_vf4_i32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf8_i64m1_m(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return vsext_vf8_i64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf8_i64m2_m(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return vsext_vf8_i64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf8_i64m4_m(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return vsext_vf8_i64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf8_i64m8_m(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return vsext_vf8_i64m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf2_i32mf2_m(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { + return vsext_vf2_i32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf2_i32m1_m(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { + return vsext_vf2_i32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf2_i32m2_m(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint16m1_t op1, size_t vl) { + return vsext_vf2_i32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf2_i32m4_m(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint16m2_t op1, size_t vl) { + return vsext_vf2_i32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return vsext_vf2_i32m8_m(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint16m4_t op1, size_t vl) { + return vsext_vf2_i32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf4_i64m1_m(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { + return vsext_vf4_i64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf4_i64m2_m(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { + return vsext_vf4_i64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf4_i64m4_m(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) { + return vsext_vf4_i64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf4_i64m8_m(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) { + return vsext_vf4_i64m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return vsext_vf2_i64m1_m(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vsext_vf2_i64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return vsext_vf2_i64m2_m(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint32m1_t op1, size_t vl) { + return vsext_vf2_i64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return vsext_vf2_i64m4_m(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint32m2_t op1, size_t vl) { + return vsext_vf2_i64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return vsext_vf2_i64m8_m(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint32m4_t op1, size_t vl) { + return vsext_vf2_i64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1down.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf8_m(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) { + return vslide1down_vx_i8mf8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf4_m(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) { + return vslide1down_vx_i8mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf2_m(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) { + return vslide1down_vx_i8mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m1_m(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { + return vslide1down_vx_i8m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m2_m(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { + return vslide1down_vx_i8m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m4_m(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { + return vslide1down_vx_i8m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m8_m(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { + return vslide1down_vx_i8m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf4_m(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) { + return vslide1down_vx_i16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf2_m(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) { + return vslide1down_vx_i16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m1_m(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { + return vslide1down_vx_i16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m2_m(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { + return vslide1down_vx_i16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m4_m(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { + return vslide1down_vx_i16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m8_m(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { + return vslide1down_vx_i16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32mf2_m(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m1_m(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m2_m(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m4_m(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m8_m(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m1_m(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { + return vslide1down_vx_i64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m2_m(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { + return vslide1down_vx_i64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m4_m(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { + return vslide1down_vx_i64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m8_m(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { + return vslide1down_vx_i64m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf8_m(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) { + return vslide1down_vx_u8mf8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf4_m(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) { + return vslide1down_vx_u8mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf2_m(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) { + return vslide1down_vx_u8mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m1_m(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { + return vslide1down_vx_u8m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m2_m(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { + return vslide1down_vx_u8m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m4_m(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { + return vslide1down_vx_u8m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m8_m(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { + return vslide1down_vx_u8m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf4_m(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) { + return vslide1down_vx_u16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf2_m(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) { + return vslide1down_vx_u16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m1_m(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { + return vslide1down_vx_u16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m2_m(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { + return vslide1down_vx_u16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m4_m(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { + return vslide1down_vx_u16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m8_m(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { + return vslide1down_vx_u16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32mf2_m(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m1_m(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m2_m(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m4_m(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m8_m(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m1_m(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { + return vslide1down_vx_u64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m2_m(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { + return vslide1down_vx_u64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m4_m(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { + return vslide1down_vx_u64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m8_m(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { + return vslide1down_vx_u64m8_m(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1up.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf8_m(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) { + return vslide1up_vx_i8mf8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf4_m(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) { + return vslide1up_vx_i8mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf2_m(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) { + return vslide1up_vx_i8mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m1_m(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { + return vslide1up_vx_i8m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m2_m(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { + return vslide1up_vx_i8m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m4_m(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { + return vslide1up_vx_i8m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m8_m(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { + return vslide1up_vx_i8m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf4_m(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) { + return vslide1up_vx_i16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf2_m(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) { + return vslide1up_vx_i16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m1_m(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { + return vslide1up_vx_i16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m2_m(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { + return vslide1up_vx_i16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m4_m(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { + return vslide1up_vx_i16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m8_m(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { + return vslide1up_vx_i16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32mf2_m(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m1_m(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m2_m(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m4_m(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m8_m(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m1_m(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { + return vslide1up_vx_i64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m2_m(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { + return vslide1up_vx_i64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m4_m(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { + return vslide1up_vx_i64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m8_m(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { + return vslide1up_vx_i64m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf8_m(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) { + return vslide1up_vx_u8mf8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf4_m(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) { + return vslide1up_vx_u8mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf2_m(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) { + return vslide1up_vx_u8mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m1_m(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { + return vslide1up_vx_u8m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m2_m(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { + return vslide1up_vx_u8m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m4_m(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { + return vslide1up_vx_u8m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m8_m(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { + return vslide1up_vx_u8m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf4_m(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) { + return vslide1up_vx_u16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf2_m(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) { + return vslide1up_vx_u16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m1_m(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { + return vslide1up_vx_u16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m2_m(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { + return vslide1up_vx_u16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m4_m(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { + return vslide1up_vx_u16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m8_m(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { + return vslide1up_vx_u16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32mf2_m(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m1_m(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m2_m(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m4_m(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m8_m(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m1_m(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { + return vslide1up_vx_u64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m2_m(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { + return vslide1up_vx_u64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m4_m(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { + return vslide1up_vx_u64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m8_m(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { + return vslide1up_vx_u64m8_m(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t src, size_t offset, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t src, size_t offset, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t src, size_t offset, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t src, size_t offset, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t src, size_t offset, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t src, size_t offset, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t src, size_t offset, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t src, size_t offset, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t src, size_t offset, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t src, size_t offset, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t src, size_t offset, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t src, size_t offset, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t src, size_t offset, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t src, size_t offset, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t src, size_t offset, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t src, size_t offset, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t src, size_t offset, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t src, size_t offset, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t src, size_t offset, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t src, size_t offset, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t src, size_t offset, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t src, size_t offset, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t src, size_t offset, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t src, size_t offset, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t src, size_t offset, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t src, size_t offset, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t src, size_t offset, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t src, size_t offset, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t src, size_t offset, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t src, size_t offset, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t src, size_t offset, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t src, size_t offset, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t src, size_t offset, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t src, size_t offset, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t src, size_t offset, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t src, size_t offset, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t src, size_t offset, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t src, size_t offset, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t src, size_t offset, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t src, size_t offset, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t src, size_t offset, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t src, size_t offset, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t src, size_t offset, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t src, size_t offset, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t src, size_t offset, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t src, size_t offset, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t src, size_t offset, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t src, size_t offset, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t src, size_t offset, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t src, size_t offset, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t src, size_t offset, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t src, size_t offset, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t src, size_t offset, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t src, size_t offset, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t src, size_t offset, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t src, size_t offset, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t src, size_t offset, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t src, size_t offset, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl) { @@ -540,532 +540,532 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf4_m(mask, maskedoff, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf2_m(mask, maskedoff, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m1_m(mask, maskedoff, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m2_m(mask, maskedoff, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m4_m(mask, maskedoff, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m8_m(mask, maskedoff, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_m(mask, maskedoff, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m1_m(mask, maskedoff, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m2_m(mask, maskedoff, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m4_m(mask, maskedoff, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m8_m(mask, maskedoff, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m1_m(mask, maskedoff, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m2_m(mask, maskedoff, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m4_m(mask, maskedoff, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m8_m(mask, maskedoff, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf8_m(mask, maskedoff, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf4_m(mask, maskedoff, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf2_m(mask, maskedoff, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m1_m(mask, maskedoff, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m2_m(mask, maskedoff, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m4_m(mask, maskedoff, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m8_m(mask, maskedoff, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf4_m(mask, maskedoff, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf2_m(mask, maskedoff, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m1_m(mask, maskedoff, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m2_m(mask, maskedoff, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m4_m(mask, maskedoff, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m8_m(mask, maskedoff, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_m(mask, maskedoff, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m1_m(mask, maskedoff, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m2_m(mask, maskedoff, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m4_m(mask, maskedoff, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m8_m(mask, maskedoff, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m1_m(mask, maskedoff, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m2_m(mask, maskedoff, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m4_m(mask, maskedoff, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m8_m(mask, maskedoff, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf8_m(mask, maskedoff, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf4_m(mask, maskedoff, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf2_m(mask, maskedoff, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m1_m(mask, maskedoff, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m2_m(mask, maskedoff, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m4_m(mask, maskedoff, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m8_m(mask, maskedoff, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf4_m(mask, maskedoff, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf2_m(mask, maskedoff, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m1_m(mask, maskedoff, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m2_m(mask, maskedoff, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m4_m(mask, maskedoff, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m8_m(mask, maskedoff, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_m(mask, maskedoff, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m1_m(mask, maskedoff, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m2_m(mask, maskedoff, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m4_m(mask, maskedoff, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m8_m(mask, maskedoff, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m1_m(mask, maskedoff, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m2_m(mask, maskedoff, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m4_m(mask, maskedoff, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m8_m(mask, maskedoff, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m8_m(mask, src, offset, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsll.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsll.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_i8mf8_m(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll_vv_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf8_m(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { + return vsll_vx_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_i8mf4_m(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll_vv_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf4_m(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_i8mf2_m(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll_vv_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf2_m(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_i8m1_m(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll_vv_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m1_m(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_i8m2_m(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll_vv_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m2_m(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_i8m4_m(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll_vv_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m4_m(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_i8m8_m(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll_vv_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m8_m(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_i16mf4_m(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll_vv_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf4_m(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_i16mf2_m(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll_vv_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf2_m(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_i16m1_m(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll_vv_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m1_m(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_i16m2_m(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll_vv_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m2_m(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_i16m4_m(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll_vv_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m4_m(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_i16m8_m(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll_vv_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m8_m(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_i32mf2_m(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32mf2_m(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_i32m1_m(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll_vv_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m1_m(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_i32m2_m(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll_vv_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m2_m(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_i32m4_m(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll_vv_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m4_m(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_i32m8_m(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll_vv_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m8_m(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_i64m1_m(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll_vv_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m1_m(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_i64m2_m(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll_vv_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m2_m(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_i64m4_m(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll_vv_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m4_m(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_i64m8_m(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll_vv_i64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m8_m(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_u8mf8_m(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll_vv_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf8_m(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { + return vsll_vx_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_u8mf4_m(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll_vv_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf4_m(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_u8mf2_m(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll_vv_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf2_m(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_u8m1_m(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll_vv_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m1_m(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_u8m2_m(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll_vv_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m2_m(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_u8m4_m(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll_vv_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m4_m(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_u8m8_m(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll_vv_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m8_m(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_u16mf4_m(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll_vv_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf4_m(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_u16mf2_m(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll_vv_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf2_m(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_u16m1_m(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll_vv_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m1_m(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_u16m2_m(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll_vv_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m2_m(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_u16m4_m(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll_vv_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m4_m(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_u16m8_m(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll_vv_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m8_m(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_u32mf2_m(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32mf2_m(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_u32m1_m(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll_vv_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m1_m(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_u32m2_m(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll_vv_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m2_m(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_u32m4_m(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll_vv_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m4_m(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_u32m8_m(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll_vv_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m8_m(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_u64m1_m(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll_vv_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m1_m(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_u64m2_m(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll_vv_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m2_m(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_u64m4_m(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll_vv_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m4_m(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_u64m8_m(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll_vv_u64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m8_m(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m8_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsmul_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsmul_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsmul_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsmul_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsmul_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsmul_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsmul_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsmul_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsmul_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsmul_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsmul_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsmul_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsmul_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsmul_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsmul_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsmul_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsmul_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsmul_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsmul_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsmul_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsmul_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsra.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsra_vv_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { + return vsra_vx_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsra_vv_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { + return vsra_vx_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsra_vv_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsra_vv_i8m1_m(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsra_vv_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m1_m(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsra_vv_i8m2_m(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsra_vv_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m2_m(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsra_vv_i8m4_m(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsra_vv_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m4_m(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsra_vv_i8m8_m(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsra_vv_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m8_m(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsra_vv_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { + return vsra_vx_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsra_vv_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsra_vv_i16m1_m(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsra_vv_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m1_m(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsra_vv_i16m2_m(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsra_vv_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m2_m(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsra_vv_i16m4_m(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsra_vv_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m4_m(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsra_vv_i16m8_m(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsra_vv_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m8_m(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_vv_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsra_vv_i32m1_m(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsra_vv_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m1_m(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsra_vv_i32m2_m(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsra_vv_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m2_m(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsra_vv_i32m4_m(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsra_vv_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m4_m(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsra_vv_i32m8_m(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsra_vv_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m8_m(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsra_vv_i64m1_m(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsra_vv_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m1_m(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsra_vv_i64m2_m(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsra_vv_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m2_m(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsra_vv_i64m4_m(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsra_vv_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m4_m(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsra_vv_i64m8_m(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsra_vv_i64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m8_m(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m8_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsrl.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsrl_vv_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsrl_vv_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsrl_vv_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsrl_vv_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsrl_vv_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsrl_vv_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsrl_vv_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsrl_vv_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsrl_vv_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsrl_vv_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsrl_vv_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsrl_vv_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsrl_vv_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_vv_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsrl_vv_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsrl_vv_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsrl_vv_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsrl_vv_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsrl_vv_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsrl_vv_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsrl_vv_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsrl_vv_u64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m8_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssra_vv_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { + return vssra_vx_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssra_vv_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { + return vssra_vx_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssra_vv_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssra_vv_i8m1_m(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssra_vv_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m1_m(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssra_vv_i8m2_m(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssra_vv_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m2_m(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssra_vv_i8m4_m(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssra_vv_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m4_m(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssra_vv_i8m8_m(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssra_vv_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m8_m(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vssra_vv_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { + return vssra_vx_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vssra_vv_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssra_vv_i16m1_m(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssra_vv_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m1_m(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssra_vv_i16m2_m(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssra_vv_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m2_m(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssra_vv_i16m4_m(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssra_vv_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m4_m(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssra_vv_i16m8_m(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssra_vv_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m8_m(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_vv_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssra_vv_i32m1_m(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssra_vv_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m1_m(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssra_vv_i32m2_m(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssra_vv_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m2_m(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssra_vv_i32m4_m(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssra_vv_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m4_m(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssra_vv_i32m8_m(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssra_vv_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m8_m(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssra_vv_i64m1_m(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssra_vv_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m1_m(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssra_vv_i64m2_m(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssra_vv_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m2_m(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssra_vv_i64m4_m(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssra_vv_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m4_m(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssra_vv_i64m8_m(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssra_vv_i64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m8_m(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m8_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssrl_vv_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssrl_vv_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssrl_vv_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssrl_vv_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssrl_vv_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssrl_vv_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssrl_vv_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vssrl_vv_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vssrl_vv_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssrl_vv_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssrl_vv_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssrl_vv_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssrl_vv_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_vv_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssrl_vv_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssrl_vv_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssrl_vv_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssrl_vv_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssrl_vv_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssrl_vv_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssrl_vv_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssrl_vv_u64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m8_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vssub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vssub_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vssub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vssub_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vssub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vssub_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vssub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vssub_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vssub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vssub_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vssub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vssub_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vssub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vssub_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vssub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vssub_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vssub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vssub_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vssub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vssub_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vssub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vssub_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vssub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vssub_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vssub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vssub_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vssub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vssub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vssub_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vssub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vssub_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vssub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vssub_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vssub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vssub_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vssub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vssub_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vssub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vssub_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vssub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vssub_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vssub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vssub_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vssubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vssubu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vssubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vssubu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vssubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vssubu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vssubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vssubu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vssubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vssubu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vssubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vssubu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vssubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vssubu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vssubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vssubu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vssubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vssubu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vssubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vssubu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vssubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vssubu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vssubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vssubu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vssubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vssubu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vssubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vssubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vssubu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vssubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vssubu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vssubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vssubu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vssubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vssubu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vssubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vssubu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vssubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vssubu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vssubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vssubu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vssubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vssubu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsub.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsub_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsub_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsub_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsub_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsub_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsub_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsub_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsub_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsub_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsub_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsub_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsub_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsub_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsub_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsub_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsub_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsub_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsub_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsub_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsub_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsub_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsub_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsub_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsub_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsub_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsub_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsub_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsub_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsub_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsub_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsub_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsub_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsub_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsub_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsub_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsub_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vsub_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsub_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vsub_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsub_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsub_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsub_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsub_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsub_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsub_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsub_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsub_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsub_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsub_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsub_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsub_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsub_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsub_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsub_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsub_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsub_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsub_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsub_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsub_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsub_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsub_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsub_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsub_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsub_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwadd.c @@ -548,541 +548,541 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwadd_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_wv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwadd_wv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwadd_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_wv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwadd_wv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwadd_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_wv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwadd_wv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwadd_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwadd_wv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwadd_wv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwadd_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwadd_wv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwadd_wv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwadd_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwadd_wv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwadd_wv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwadd_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_wv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { + return vwadd_wv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwadd_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_wv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwadd_wv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwadd_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwadd_wv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwadd_wv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwadd_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwadd_wv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwadd_wv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwadd_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwadd_wv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwadd_wv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_wv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwadd_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwadd_wv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwadd_wv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwadd_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwadd_wv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwadd_wv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwadd_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwadd_wv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwadd_wv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwaddu.c @@ -548,541 +548,541 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vwaddu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { + return vwaddu_wv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vwaddu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { + return vwaddu_wv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwaddu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_wv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { + return vwaddu_wv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_wv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu_wv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_wv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu_wv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_wv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu_wv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vwaddu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vwaddu_wv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vwaddu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_wv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vwaddu_wv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwaddu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_wv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vwaddu_wv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwaddu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_wv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vwaddu_wv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwaddu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_wv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vwaddu_wv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_wv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwaddu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_wv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { + return vwaddu_wv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwaddu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_wv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { + return vwaddu_wv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwaddu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_wv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { + return vwaddu_wv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvt.c @@ -143,136 +143,136 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vwcvt_x_x_v_i16mf4_m(mask, maskedoff, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) { + return vwcvt_x_x_v_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vwcvt_x_x_v_i16mf2_m(mask, maskedoff, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) { + return vwcvt_x_x_v_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vwcvt_x_x_v_i16m1_m(mask, maskedoff, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { + return vwcvt_x_x_v_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vwcvt_x_x_v_i16m2_m(mask, maskedoff, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { + return vwcvt_x_x_v_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vwcvt_x_x_v_i16m4_m(mask, maskedoff, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { + return vwcvt_x_x_v_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vwcvt_x_x_v_i16m8_m(mask, maskedoff, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { + return vwcvt_x_x_v_i16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vwcvt_x_x_v_i32mf2_m(mask, maskedoff, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) { + return vwcvt_x_x_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vwcvt_x_x_v_i32m1_m(mask, maskedoff, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) { + return vwcvt_x_x_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vwcvt_x_x_v_i32m2_m(mask, maskedoff, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { + return vwcvt_x_x_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vwcvt_x_x_v_i32m4_m(mask, maskedoff, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { + return vwcvt_x_x_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vwcvt_x_x_v_i32m8_m(mask, maskedoff, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { + return vwcvt_x_x_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vwcvt_x_x_v_i64m1_m(mask, maskedoff, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vwcvt_x_x_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vwcvt_x_x_v_i64m2_m(mask, maskedoff, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vwcvt_x_x_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vwcvt_x_x_v_i64m4_m(mask, maskedoff, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vwcvt_x_x_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vwcvt_x_x_v_i64m8_m(mask, maskedoff, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vwcvt_x_x_v_i64m8_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvtu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvtu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvtu.c @@ -143,136 +143,136 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) { + return vwcvtu_x_x_v_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf2_m(mask, maskedoff, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) { + return vwcvtu_x_x_v_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m1_m(mask, maskedoff, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) { + return vwcvtu_x_x_v_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vwcvtu_x_x_v_u16m2_m(mask, maskedoff, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { + return vwcvtu_x_x_v_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m4_m(mask, maskedoff, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { + return vwcvtu_x_x_v_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vwcvtu_x_x_v_u16m8_m(mask, maskedoff, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { + return vwcvtu_x_x_v_u16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u32mf2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { + return vwcvtu_x_x_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m1_m(mask, maskedoff, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { + return vwcvtu_x_x_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vwcvtu_x_x_v_u32m2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { + return vwcvtu_x_x_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m4_m(mask, maskedoff, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { + return vwcvtu_x_x_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vwcvtu_x_x_v_u32m8_m(mask, maskedoff, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { + return vwcvtu_x_x_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m1_m(mask, maskedoff, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_x_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vwcvtu_x_x_v_u64m2_m(mask, maskedoff, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vwcvtu_x_x_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m4_m(mask, maskedoff, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vwcvtu_x_x_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vwcvtu_x_x_v_u64m8_m(mask, maskedoff, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vwcvtu_x_x_v_u64m8_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccsu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccsu.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccu.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccus.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccus.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccus.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccus.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmul.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwmul_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vwmul_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwmul_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vwmul_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwmul_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vwmul_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwmul_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vwmul_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwmul_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vwmul_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwmul_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vwmul_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwmul_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vwmul_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwmul_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vwmul_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwmul_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vwmul_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwmul_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vwmul_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwmul_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vwmul_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwmul_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vwmul_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwmul_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vwmul_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwmul_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vwmul_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulsu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulsu.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vwmulsu_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { + return vwmulsu_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vwmulsu_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { + return vwmulsu_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwmulsu_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { + return vwmulsu_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwmulsu_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { + return vwmulsu_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwmulsu_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { + return vwmulsu_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwmulsu_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { + return vwmulsu_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vwmulsu_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { + return vwmulsu_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vwmulsu_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { + return vwmulsu_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwmulsu_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { + return vwmulsu_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwmulsu_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { + return vwmulsu_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwmulsu_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { + return vwmulsu_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwmulsu_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { + return vwmulsu_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwmulsu_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwmulsu_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { + return vwmulsu_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulu.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vwmulu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwmulu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vwmulu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwmulu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwmulu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwmulu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwmulu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwmulu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwmulu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwmulu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwmulu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwmulu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vwmulu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwmulu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vwmulu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwmulu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwmulu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwmulu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwmulu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwmulu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwmulu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwmulu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwmulu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwmulu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwmulu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwmulu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwmulu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwmulu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c @@ -170,163 +170,163 @@ // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf8_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m1_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m2_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m4_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m8_i16m1_m(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf4_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m1_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m2_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m4_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m8_i32m1_m(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m1_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m2_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m4_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m8_i64m1_m(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c @@ -170,163 +170,163 @@ // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf8_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m1_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m2_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m4_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m8_u16m1_m(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf4_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m1_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m2_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m4_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m8_u32m1_m(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m1_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m2_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m4_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m8_u64m1_m(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsub.c @@ -548,541 +548,541 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwsub_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_wv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwsub_wv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwsub_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_wv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwsub_wv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwsub_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_wv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwsub_wv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwsub_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwsub_wv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwsub_wv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwsub_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwsub_wv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwsub_wv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwsub_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwsub_wv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwsub_wv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwsub_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_wv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { + return vwsub_wv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwsub_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_wv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwsub_wv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwsub_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwsub_wv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwsub_wv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwsub_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwsub_wv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwsub_wv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwsub_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwsub_wv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwsub_wv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_wv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwsub_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwsub_wv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwsub_wv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwsub_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwsub_wv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwsub_wv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwsub_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwsub_wv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwsub_wv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsubu.c @@ -548,541 +548,541 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vwsubu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { + return vwsubu_wv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vwsubu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { + return vwsubu_wv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwsubu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_wv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { + return vwsubu_wv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_wv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu_wv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_wv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu_wv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_wv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu_wv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vwsubu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vwsubu_wv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vwsubu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_wv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vwsubu_wv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwsubu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_wv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vwsubu_wv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwsubu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_wv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vwsubu_wv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwsubu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_wv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vwsubu_wv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_wv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwsubu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_wv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { + return vwsubu_wv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwsubu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_wv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { + return vwsubu_wv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwsubu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_wv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { + return vwsubu_wv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vxor.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vxor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vxor_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vxor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vxor_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vxor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vxor_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vxor_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vxor_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vxor_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vxor_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vxor_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vxor_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vxor_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vxor_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vxor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vxor_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vxor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vxor_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vxor_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vxor_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vxor_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vxor_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vxor_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vxor_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vxor_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vxor_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vxor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vxor_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vxor_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vxor_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vxor_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vxor_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vxor_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vxor_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vxor_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vxor_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vxor_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vxor_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vxor_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vxor_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vxor_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vxor_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vxor_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vxor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vxor_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vxor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vxor_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vxor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vxor_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vxor_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vxor_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vxor_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vxor_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vxor_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vxor_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vxor_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vxor_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vxor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vxor_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vxor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vxor_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vxor_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vxor_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vxor_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vxor_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vxor_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vxor_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vxor_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vxor_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vxor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vxor_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vxor_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vxor_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vxor_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vxor_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vxor_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vxor_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vxor_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vxor_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vxor_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vxor_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vxor_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vxor_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vxor_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vxor_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vxor_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c @@ -260,253 +260,253 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf2_u16mf4_m(mask, maskedoff, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return vzext_vf2_u16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf2_u16mf2_m(mask, maskedoff, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return vzext_vf2_u16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf2_u16m1_m(mask, maskedoff, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return vzext_vf2_u16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf2_u16m2_m(mask, maskedoff, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return vzext_vf2_u16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf2_u16m4_m(mask, maskedoff, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { + return vzext_vf2_u16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return vzext_vf2_u16m8_m(mask, maskedoff, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { + return vzext_vf2_u16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf4_u32mf2_m(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return vzext_vf4_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf4_u32m1_m(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return vzext_vf4_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf4_u32m2_m(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return vzext_vf4_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf4_u32m4_m(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return vzext_vf4_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf4_u32m8_m(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { + return vzext_vf4_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf8_u64m1_m(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return vzext_vf8_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf8_u64m2_m(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return vzext_vf8_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf8_u64m4_m(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return vzext_vf8_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf8_u64m8_m(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return vzext_vf8_u64m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf2_u32mf2_m(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { + return vzext_vf2_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf2_u32m1_m(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { + return vzext_vf2_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf2_u32m2_m(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { + return vzext_vf2_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf2_u32m4_m(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { + return vzext_vf2_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return vzext_vf2_u32m8_m(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { + return vzext_vf2_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf4_u64m1_m(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { + return vzext_vf4_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf4_u64m2_m(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { + return vzext_vf4_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf4_u64m4_m(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { + return vzext_vf4_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf4_u64m8_m(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { + return vzext_vf4_u64m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vzext_vf2_u64m1_m(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return vzext_vf2_u64m2_m(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { + return vzext_vf2_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return vzext_vf2_u64m4_m(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { + return vzext_vf2_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return vzext_vf2_u64m8_m(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { + return vzext_vf2_u64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaadd.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vaadd(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaaddu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vadd.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vadd(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vand.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vand(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasub.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vasub(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vasubu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdiv.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vdiv(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdivu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdivu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdivu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vdivu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vdivu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfabs.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfabs.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfabs(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfabs(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfadd(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfclass(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfclass(mask, maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfclass(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c @@ -819,811 +819,811 @@ // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vint16mf4_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vint16mf2_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vint16m1_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vint16m2_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vint16m4_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vint16m8_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint16m1_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint16m2_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint16m4_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint16m8_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vint32m8_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint32m8_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vint64m1_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vint64m2_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vint64m4_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vint64m8_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint64m1_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint64m2_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint64m4_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint64m8_t src, size_t vl) { + return vfcvt_f(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfdiv.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfdiv(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfmax(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfmin(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfmul(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c @@ -873,865 +873,865 @@ // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfncvt_rod_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfncvt_rod_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfncvt_rod_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return vfncvt_rod_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return vfncvt_rod_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return vfncvt_rod_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return vfncvt_rod_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return vfncvt_rod_f(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfneg.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfneg(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrdiv.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfrdiv(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfrec7(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfredusum(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfrsqrt7(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfrsub(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfsgnj(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfsgnjn(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfsgnjx(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, double value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, double value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, double value, size_t vl) { + return vfslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, double value, size_t vl) { + return vfslide1down(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16.i64( poison, [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( poison, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, double value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, double value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, double value, size_t vl) { + return vfslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( poison, [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, double value, size_t vl) { + return vfslide1up(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c @@ -144,136 +144,136 @@ // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { + return vfsqrt(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vfsub(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd.c @@ -333,325 +333,325 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { + return vfwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { + return vfwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { + return vfwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c @@ -684,676 +684,676 @@ // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfwcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfwcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfwcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfwcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfwcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfwcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfwcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfwcvt_rtz_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfwcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfwcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfwcvt_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return vfwcvt_f(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul.c @@ -171,163 +171,163 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfwmul(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c @@ -108,100 +108,100 @@ // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c @@ -108,100 +108,100 @@ // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub.c @@ -333,325 +333,325 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { + return vfwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { + return vfwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { + return vfwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vid.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vid.c +++ /dev/null @@ -1,206 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: @test_vid_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u8m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - -// CHECK-RV64-LABEL: @test_vid_v_u64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return vid(mask, maskedoff, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/viota.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/viota.c +++ /dev/null @@ -1,206 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: @test_viota_m_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u8m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_viota_m_u64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return viota(mask, maskedoff, op1, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16.c @@ -9,163 +9,163 @@ // CHECK-RV64-LABEL: @test_vle16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t vl) { + return vle16(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); +vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t vl) { + return vle16(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle16ff.c @@ -9,217 +9,217 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff(mask, maskedoff, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vle16ff(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32.c @@ -9,136 +9,136 @@ // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, const float *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, const float *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, const float *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, const float *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, const float *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t vl) { + return vle32(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); +vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t vl) { + return vle32(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle32ff.c @@ -9,181 +9,181 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t mask, const float *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, const float *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vle32ff_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff(mask, maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64.c @@ -9,109 +9,109 @@ // CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, const double *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, const double *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, const double *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, const double *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t vl) { + return vle64(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); +vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t vl) { + return vle64(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle64ff.c @@ -9,145 +9,145 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t mask, const double *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, const double *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vle64ff_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff(mask, maskedoff, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vle64ff(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8.c @@ -9,127 +9,127 @@ // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t vl) { + return vle8(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); +vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t vl) { + return vle8(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vle8ff.c @@ -9,169 +9,169 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff(mask, maskedoff, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vle8ff(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei16.c @@ -522,514 +522,514 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei32.c @@ -477,469 +477,469 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei64.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxei8.c @@ -540,532 +540,532 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei16.c @@ -633,625 +633,625 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei32.c @@ -607,599 +607,599 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei64.c @@ -542,534 +542,534 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg2ei8.c @@ -633,625 +633,625 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei16.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei32.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei64.c @@ -534,7 +534,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -543,13 +543,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -558,13 +558,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg3ei8.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei16.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei32.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei64.c @@ -604,7 +604,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -615,13 +615,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -632,13 +632,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,7 +1193,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg4ei8.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei16.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei32.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei64.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg5ei8.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei16.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei32.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei64.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg6ei8.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei16.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei32.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei64.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg7ei8.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei16.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei32.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei64.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vloxseg8ei8.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse16.c @@ -9,163 +9,163 @@ // CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse32.c @@ -9,136 +9,136 @@ // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse64.c @@ -9,109 +9,109 @@ // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlse8.c @@ -8,127 +8,127 @@ // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16.c @@ -9,196 +9,196 @@ // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e16ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -18,13 +18,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -33,13 +33,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -63,13 +63,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -93,13 +93,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -123,13 +123,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -168,13 +168,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -183,13 +183,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -198,13 +198,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -213,13 +213,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -228,7 +228,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32.c @@ -9,157 +9,157 @@ // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e32ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -18,13 +18,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -33,13 +33,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -63,13 +63,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -93,13 +93,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -123,13 +123,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -168,13 +168,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64.c @@ -9,118 +9,118 @@ // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, size_t vl) { + return vlseg2e64(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, size_t vl) { + return vlseg2e64(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, size_t vl) { + return vlseg2e64(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e64ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -18,13 +18,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -33,13 +33,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -63,13 +63,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -93,13 +93,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -123,13 +123,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -138,7 +138,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8.c @@ -8,157 +8,157 @@ // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg2e8ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -18,13 +18,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -33,13 +33,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -63,13 +63,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -93,13 +93,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -123,13 +123,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -168,13 +168,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -18,13 +18,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -33,13 +33,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -63,13 +63,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -93,13 +93,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -123,13 +123,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -168,13 +168,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e16ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -20,13 +20,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -37,13 +37,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -54,13 +54,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -88,13 +88,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -105,13 +105,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -139,13 +139,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -156,13 +156,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -190,13 +190,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -18,13 +18,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -33,13 +33,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -63,13 +63,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -93,13 +93,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -123,13 +123,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -138,7 +138,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e32ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -20,13 +20,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -37,13 +37,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -54,13 +54,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -88,13 +88,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -105,13 +105,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -139,13 +139,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -156,7 +156,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -18,13 +18,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -33,13 +33,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -63,13 +63,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -93,7 +93,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e64ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -20,13 +20,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -37,13 +37,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -54,13 +54,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -88,13 +88,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -105,7 +105,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17,13 +17,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -32,13 +32,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -47,13 +47,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -62,13 +62,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -77,13 +77,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -92,13 +92,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -107,13 +107,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -137,13 +137,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -152,7 +152,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg3e8ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -20,13 +20,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -37,13 +37,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -54,13 +54,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -88,13 +88,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -105,13 +105,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -139,13 +139,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -156,13 +156,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -173,7 +173,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP4]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20,13 +20,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -37,13 +37,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,13 +54,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -88,13 +88,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -105,13 +105,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -139,13 +139,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -156,13 +156,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -190,13 +190,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e16ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -22,13 +22,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -41,13 +41,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -60,13 +60,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -79,13 +79,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -98,13 +98,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -117,13 +117,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -136,13 +136,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -155,13 +155,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -174,13 +174,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -193,13 +193,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -212,13 +212,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -231,7 +231,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20,13 +20,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -37,13 +37,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,13 +54,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -88,13 +88,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -105,13 +105,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -139,13 +139,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -156,7 +156,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e32ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -22,13 +22,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -41,13 +41,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -60,13 +60,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -79,13 +79,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -98,13 +98,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -117,13 +117,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -136,13 +136,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -155,13 +155,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -174,7 +174,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20,13 +20,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -37,13 +37,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,13 +54,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -88,13 +88,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -105,7 +105,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e64ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -22,13 +22,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -41,13 +41,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -60,13 +60,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -79,13 +79,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -98,13 +98,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -19,13 +19,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -36,13 +36,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -70,13 +70,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -104,13 +104,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -121,13 +121,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -155,13 +155,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -172,7 +172,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg4e8ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -22,13 +22,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -41,13 +41,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -60,13 +60,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -79,13 +79,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -98,13 +98,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -117,13 +117,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -136,13 +136,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -155,13 +155,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -174,13 +174,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -193,7 +193,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP5]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -22,13 +22,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -41,13 +41,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -60,13 +60,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -79,13 +79,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -98,13 +98,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -117,13 +117,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -136,13 +136,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -155,13 +155,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -174,7 +174,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e16ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -24,13 +24,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -45,13 +45,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -66,13 +66,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -129,13 +129,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -150,13 +150,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -171,13 +171,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -192,7 +192,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -22,13 +22,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -41,13 +41,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -60,13 +60,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -79,13 +79,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -98,13 +98,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e32ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -24,13 +24,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -45,13 +45,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -66,13 +66,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -129,7 +129,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -22,13 +22,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { - return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, size_t vl) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -41,13 +41,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { - return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -60,7 +60,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { - return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e64ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -24,13 +24,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -45,13 +45,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -66,7 +66,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21,13 +21,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -40,13 +40,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -59,13 +59,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -97,13 +97,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -116,13 +116,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -135,13 +135,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -154,7 +154,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg5e8ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -24,13 +24,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -45,13 +45,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -66,13 +66,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -129,13 +129,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -150,13 +150,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -171,7 +171,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP6]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +void test_vlseg5e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -24,13 +24,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -45,13 +45,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -66,13 +66,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -129,13 +129,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -150,13 +150,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -171,13 +171,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -192,7 +192,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e16ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -26,13 +26,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -49,13 +49,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -72,13 +72,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -95,13 +95,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -118,13 +118,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -141,13 +141,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -164,13 +164,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -187,13 +187,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -24,13 +24,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -45,13 +45,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -66,13 +66,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -129,7 +129,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e32ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -26,13 +26,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -49,13 +49,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -72,13 +72,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -95,13 +95,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -118,13 +118,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -141,7 +141,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -24,13 +24,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { - return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, size_t vl) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -45,13 +45,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { - return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -66,7 +66,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { - return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e64ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -26,13 +26,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -49,13 +49,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -72,7 +72,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -23,13 +23,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -44,13 +44,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -65,13 +65,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -86,13 +86,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -107,13 +107,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -149,13 +149,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg6e8ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -26,13 +26,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -49,13 +49,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -72,13 +72,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -95,13 +95,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -118,13 +118,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -141,13 +141,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -164,13 +164,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -187,7 +187,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP7]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +void test_vlseg6e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -26,13 +26,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -49,13 +49,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -72,13 +72,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -95,13 +95,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -118,13 +118,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -141,13 +141,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -164,13 +164,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -187,13 +187,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e16ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -28,13 +28,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -103,13 +103,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -178,13 +178,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -203,13 +203,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -228,7 +228,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -26,13 +26,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -49,13 +49,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -72,13 +72,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -95,13 +95,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -118,13 +118,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -141,7 +141,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e32ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -28,13 +28,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -103,13 +103,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -26,13 +26,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { - return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, size_t vl) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -49,13 +49,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { - return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -72,7 +72,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { - return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e64ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -28,13 +28,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -78,7 +78,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -25,13 +25,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -94,13 +94,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,13 +117,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -140,13 +140,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -163,13 +163,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -186,7 +186,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg7e8ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -28,13 +28,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -103,13 +103,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -178,13 +178,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -203,7 +203,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP8]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +void test_vlseg7e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28,13 +28,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -103,13 +103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -178,13 +178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -203,13 +203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -228,7 +228,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e16ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -30,13 +30,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -57,13 +57,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -84,13 +84,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -111,13 +111,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -165,13 +165,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -192,13 +192,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -219,13 +219,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -246,7 +246,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28,13 +28,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -103,13 +103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e32ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -30,13 +30,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -57,13 +57,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -84,13 +84,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -111,13 +111,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -165,7 +165,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28,13 +28,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { - return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, size_t vl) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { - return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, size_t vl) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -78,7 +78,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { - return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, size_t vl) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e64ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -30,13 +30,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -57,13 +57,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -84,7 +84,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27,13 +27,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -52,13 +52,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -77,13 +77,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -102,13 +102,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -127,13 +127,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -152,13 +152,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -177,13 +177,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -202,7 +202,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlseg8e8ff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -30,13 +30,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -57,13 +57,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -84,13 +84,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -111,13 +111,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -165,13 +165,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -192,13 +192,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -219,7 +219,7 @@ // CHECK-RV64-NEXT: store i64 [[TMP9]], ptr [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c @@ -9,196 +9,196 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c @@ -9,157 +9,157 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c @@ -9,118 +9,118 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c @@ -8,157 +8,157 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -18,13 +18,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -33,13 +33,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -63,13 +63,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -93,13 +93,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -123,13 +123,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -168,13 +168,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -18,13 +18,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -33,13 +33,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -63,13 +63,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -93,13 +93,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -123,13 +123,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -138,7 +138,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -18,13 +18,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -33,13 +33,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -63,13 +63,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -93,7 +93,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17,13 +17,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -32,13 +32,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -47,13 +47,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -62,13 +62,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -77,13 +77,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -92,13 +92,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -107,13 +107,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -137,13 +137,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -152,7 +152,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20,13 +20,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -37,13 +37,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,13 +54,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -88,13 +88,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -105,13 +105,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -139,13 +139,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -156,13 +156,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -173,13 +173,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -190,13 +190,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20,13 +20,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -37,13 +37,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,13 +54,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -88,13 +88,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -105,13 +105,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -122,13 +122,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -139,13 +139,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -156,7 +156,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20,13 +20,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -37,13 +37,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,13 +54,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -88,13 +88,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -105,7 +105,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -19,13 +19,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -36,13 +36,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -70,13 +70,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -104,13 +104,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -121,13 +121,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -138,13 +138,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -155,13 +155,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -172,7 +172,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -22,13 +22,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -41,13 +41,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -60,13 +60,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -79,13 +79,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -98,13 +98,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -117,13 +117,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -136,13 +136,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -155,13 +155,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -174,7 +174,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -22,13 +22,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -41,13 +41,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -60,13 +60,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -79,13 +79,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -98,13 +98,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -22,13 +22,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -41,13 +41,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -60,7 +60,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21,13 +21,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -40,13 +40,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -59,13 +59,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -97,13 +97,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -116,13 +116,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -135,13 +135,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -154,7 +154,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -24,13 +24,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -45,13 +45,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -66,13 +66,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -129,13 +129,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -150,13 +150,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -171,13 +171,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -192,7 +192,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -24,13 +24,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -45,13 +45,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -66,13 +66,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -87,13 +87,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -108,13 +108,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -129,7 +129,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -24,13 +24,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -45,13 +45,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -66,7 +66,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -23,13 +23,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -44,13 +44,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -65,13 +65,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -86,13 +86,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -107,13 +107,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -149,13 +149,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -26,13 +26,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -49,13 +49,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -72,13 +72,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -95,13 +95,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -118,13 +118,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -141,13 +141,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -164,13 +164,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -187,13 +187,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -26,13 +26,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -49,13 +49,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -72,13 +72,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -95,13 +95,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -118,13 +118,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -141,7 +141,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -26,13 +26,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -49,13 +49,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -72,7 +72,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -25,13 +25,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -48,13 +48,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -71,13 +71,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -94,13 +94,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,13 +117,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -140,13 +140,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -163,13 +163,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -186,7 +186,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28,13 +28,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -103,13 +103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -153,13 +153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -178,13 +178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -203,13 +203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -228,7 +228,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28,13 +28,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -78,13 +78,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -103,13 +103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -128,13 +128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28,13 +28,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -53,13 +53,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -78,7 +78,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27,13 +27,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -52,13 +52,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -77,13 +77,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -102,13 +102,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -127,13 +127,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -152,13 +152,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -177,13 +177,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -202,7 +202,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei16.c @@ -522,514 +522,514 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei32.c @@ -477,469 +477,469 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei64.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxei8.c @@ -540,532 +540,532 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei16.c @@ -633,625 +633,625 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei32.c @@ -607,599 +607,599 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei64.c @@ -542,534 +542,534 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg2ei8.c @@ -633,625 +633,625 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei16.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei32.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei64.c @@ -534,7 +534,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -543,13 +543,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -558,13 +558,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg3ei8.c @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -588,13 +588,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -603,13 +603,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -618,13 +618,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -648,13 +648,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -663,13 +663,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -708,13 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -723,13 +723,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -783,13 +783,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -798,13 +798,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -813,13 +813,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -873,13 +873,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -888,13 +888,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -918,13 +918,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -933,13 +933,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -963,13 +963,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -993,13 +993,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1008,13 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1068,13 +1068,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1083,13 +1083,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1098,13 +1098,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei16.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei32.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei64.c @@ -604,7 +604,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -615,13 +615,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -632,13 +632,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,7 +1193,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg4ei8.c @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -666,13 +666,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -683,13 +683,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -700,13 +700,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -734,13 +734,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -751,13 +751,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -768,13 +768,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -802,13 +802,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -819,13 +819,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -836,13 +836,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -870,13 +870,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -887,13 +887,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +904,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -921,13 +921,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,13 +938,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -955,13 +955,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -989,13 +989,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1006,13 +1006,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1023,13 +1023,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1040,13 +1040,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1057,13 +1057,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1091,13 +1091,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1108,13 +1108,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1125,13 +1125,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1142,13 +1142,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1159,13 +1159,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1193,13 +1193,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1210,13 +1210,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1227,13 +1227,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1244,13 +1244,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei16.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei32.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei64.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg5ei8.c @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -516,13 +516,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -535,13 +535,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -554,13 +554,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -573,13 +573,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -592,13 +592,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -611,13 +611,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -630,13 +630,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -649,13 +649,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -668,13 +668,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -687,13 +687,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -706,13 +706,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -725,13 +725,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -744,13 +744,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -763,13 +763,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -782,13 +782,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -820,13 +820,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -839,13 +839,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -858,13 +858,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -896,13 +896,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -915,13 +915,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -934,13 +934,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -972,13 +972,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei16.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei32.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei64.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg6ei8.c @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -570,13 +570,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -591,13 +591,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -612,13 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -633,13 +633,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,13 +654,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -675,13 +675,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -696,13 +696,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -717,13 +717,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -738,13 +738,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,13 +759,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -780,13 +780,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -801,13 +801,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -822,13 +822,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -843,13 +843,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -864,13 +864,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -885,13 +885,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -906,13 +906,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -927,13 +927,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -948,13 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -990,13 +990,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1011,13 +1011,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1032,13 +1032,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1074,13 +1074,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1095,7 +1095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei16.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei32.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei64.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg7ei8.c @@ -607,7 +607,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -624,13 +624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -647,13 +647,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -670,13 +670,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -693,13 +693,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -716,13 +716,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -739,13 +739,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -762,13 +762,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -785,13 +785,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -808,13 +808,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -831,13 +831,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -854,13 +854,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -877,13 +877,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -900,13 +900,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -923,13 +923,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -946,13 +946,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -969,13 +969,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -992,13 +992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1015,13 +1015,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1038,13 +1038,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1061,13 +1061,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1084,13 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1107,13 +1107,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1130,13 +1130,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1176,13 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei16.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei32.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei64.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vluxseg8ei8.c @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -678,13 +678,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -703,13 +703,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -728,13 +728,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -753,13 +753,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -778,13 +778,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -803,13 +803,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -828,13 +828,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -853,13 +853,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -878,13 +878,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -903,13 +903,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -928,13 +928,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -953,13 +953,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -978,13 +978,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1003,13 +1003,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1028,13 +1028,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1053,13 +1053,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1078,13 +1078,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1103,13 +1103,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1128,13 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1153,13 +1153,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1178,13 +1178,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1203,13 +1203,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1228,13 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1253,13 +1253,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1278,13 +1278,13 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmax.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmax(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmaxu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmaxu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmaxu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfeq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmfeq(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmfge(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmfgt(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmfle(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmflt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmflt(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne.c @@ -279,271 +279,271 @@ // CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.f16.i64( poison, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( poison, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( poison, [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return vmfne(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmin.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmin(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vminu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vminu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vminu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vminu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsbf.c @@ -71,64 +71,64 @@ // CHECK-RV64-LABEL: @test_vmsbf_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return vmsbf(mask, maskedoff, op1, vl); +vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { + return vmsbf(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return vmsbf(mask, maskedoff, op1, vl); +vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { + return vmsbf(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return vmsbf(mask, maskedoff, op1, vl); +vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsbf(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return vmsbf(mask, maskedoff, op1, vl); +vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return vmsbf(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return vmsbf(mask, maskedoff, op1, vl); +vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return vmsbf(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return vmsbf(mask, maskedoff, op1, vl); +vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return vmsbf(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return vmsbf(mask, maskedoff, op1, vl); +vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return vmsbf(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmseq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmseq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmseq.c @@ -801,793 +801,793 @@ // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmseq(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmseq(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsge.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsge(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmsge(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgeu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgeu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgeu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgeu.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsgeu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsgeu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgt.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsgt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmsgt(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgtu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgtu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsgtu.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsgtu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsgtu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsif.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsif.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsif.c @@ -71,64 +71,64 @@ // CHECK-RV64-LABEL: @test_vmsif_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return vmsif(mask, maskedoff, op1, vl); +vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { + return vmsif(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return vmsif(mask, maskedoff, op1, vl); +vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { + return vmsif(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return vmsif(mask, maskedoff, op1, vl); +vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsif(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return vmsif(mask, maskedoff, op1, vl); +vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return vmsif(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return vmsif(mask, maskedoff, op1, vl); +vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return vmsif(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return vmsif(mask, maskedoff, op1, vl); +vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return vmsif(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return vmsif(mask, maskedoff, op1, vl); +vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return vmsif(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsle.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsle(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmsle(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsleu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsleu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsleu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsleu.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsleu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsleu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmslt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmslt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmslt.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmslt(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmslt(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsltu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsltu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsltu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsltu.c @@ -405,397 +405,397 @@ // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsltu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsltu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsne.c @@ -801,793 +801,793 @@ // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsne(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsne(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsof.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsof.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmsof.c @@ -71,64 +71,64 @@ // CHECK-RV64-LABEL: @test_vmsof_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return vmsof(mask, maskedoff, op1, vl); +vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { + return vmsof(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return vmsof(mask, maskedoff, op1, vl); +vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { + return vmsof(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return vmsof(mask, maskedoff, op1, vl); +vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsof(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return vmsof(mask, maskedoff, op1, vl); +vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return vmsof(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return vmsof(mask, maskedoff, op1, vl); +vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return vmsof(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return vmsof(mask, maskedoff, op1, vl); +vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return vmsof(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return vmsof(mask, maskedoff, op1, vl); +vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return vmsof(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmul.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmul(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulh.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulh.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulh.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhsu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhsu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmulhu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclip.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { + return vnclip(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnclipu.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { + return vnclipu(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vncvt.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint16mf4_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint16mf2_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint16m1_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint16m2_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint16m4_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint16m8_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint16m2_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint16m4_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint16m8_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { + return vncvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { + return vncvt_x(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vneg.c @@ -207,199 +207,199 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { + return vneg(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vneg_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); +vint64m8_t test_vneg_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { + return vneg(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnot.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnot.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t vl) { + return vnot(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vnot_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); +vuint64m8_t test_vnot_v_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t vl) { + return vnot(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsra.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { + return vnsra(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnsrl.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { + return vnsrl(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vor.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vor(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredand(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredand(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c @@ -206,199 +206,199 @@ // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmax(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmax(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c @@ -206,199 +206,199 @@ // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c @@ -206,199 +206,199 @@ // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmin(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredmin(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c @@ -206,199 +206,199 @@ // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredminu(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredor(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredor(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredsum(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return vredxor(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrem.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrem.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vrem(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vremu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vremu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vremu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vremu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vremu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c @@ -1071,1063 +1071,1063 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32f16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( poison, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( poison, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t index, size_t vl) { + return vrgather(mask, op1, index, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgatherei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgatherei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgatherei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgatherei16.c @@ -522,514 +522,514 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrsub.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vrsub(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsadd.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vsadd(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsaddu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c @@ -260,253 +260,253 @@ // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint8m2_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint8m4_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return vsext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return vsext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return vsext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return vsext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) { + return vsext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return vsext_vf8(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return vsext_vf8(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return vsext_vf8(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return vsext_vf8(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint16m1_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint16m2_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint16m4_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { + return vsext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { + return vsext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) { + return vsext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) { + return vsext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint32m1_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint32m2_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint32m4_t op1, size_t vl) { + return vsext_vf2(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1down.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { + return vslide1down(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslide1up.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( poison, [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( poison, [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( poison, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( poison, [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { + return vslide1up(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t src, size_t offset, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t src, size_t offset, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t src, size_t offset, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t src, size_t offset, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t src, size_t offset, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t src, size_t offset, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t src, size_t offset, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t src, size_t offset, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t src, size_t offset, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t src, size_t offset, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t src, size_t offset, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t src, size_t offset, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t src, size_t offset, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t src, size_t offset, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t src, size_t offset, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t src, size_t offset, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t src, size_t offset, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t src, size_t offset, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t src, size_t offset, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t src, size_t offset, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t src, size_t offset, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t src, size_t offset, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t src, size_t offset, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t src, size_t offset, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t src, size_t offset, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t src, size_t offset, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t src, size_t offset, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t src, size_t offset, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t src, size_t offset, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t src, size_t offset, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t src, size_t offset, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t src, size_t offset, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t src, size_t offset, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t src, size_t offset, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t src, size_t offset, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t src, size_t offset, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t src, size_t offset, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t src, size_t offset, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t src, size_t offset, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t src, size_t offset, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t src, size_t offset, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t src, size_t offset, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t src, size_t offset, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t src, size_t offset, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t src, size_t offset, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t src, size_t offset, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t src, size_t offset, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t src, size_t offset, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t src, size_t offset, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t src, size_t offset, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t src, size_t offset, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t src, size_t offset, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t src, size_t offset, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t src, size_t offset, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t src, size_t offset, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t src, size_t offset, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t src, size_t offset, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t src, size_t offset, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl) { @@ -540,532 +540,532 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, maskedoff, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, src, offset, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslideup.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsll.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsll.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { + return vsll(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsmul.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsra.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { + return vsra(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsrl.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { + return vsrl(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssra.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { + return vssra(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssrl.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssub.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vssub(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssubu.c @@ -404,397 +404,397 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsub.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsub(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwadd.c @@ -548,541 +548,541 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwadd_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwadd_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwaddu.c @@ -548,541 +548,541 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwaddu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { + return vwaddu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvt.c @@ -143,136 +143,136 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { + return vwcvt_x(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvtu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvtu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwcvtu.c @@ -143,136 +143,136 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i16.i64( poison, [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i32.i64( poison, [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i64.i64( poison, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { + return vwcvtu_x(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccsu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccsu.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccu.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccus.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccus.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccus.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmaccus.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmul.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vwmul(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulsu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulsu.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { + return vwmulsu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwmulu.c @@ -278,271 +278,271 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwmulu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c @@ -170,163 +170,163 @@ // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return vwredsum(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c @@ -170,163 +170,163 @@ // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsub.c @@ -548,541 +548,541 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwsub_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwsub_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsubu.c @@ -548,541 +548,541 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwsubu_vv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { + return vwsubu_wv(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vxor.c @@ -800,793 +800,793 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vxor(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c @@ -260,253 +260,253 @@ // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return vzext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return vzext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return vzext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return vzext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { + return vzext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return vzext_vf8(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return vzext_vf8(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return vzext_vf8(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return vzext_vf8(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { + return vzext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { + return vzext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { + return vzext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { + return vzext_vf4(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { + return vzext_vf2(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_ta(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_ta(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_ta(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_ta(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_ta(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_ta(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_ta(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_ta(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_ta(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_ta(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_ta(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_ta(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_ta(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_ta(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_ta(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_ta(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_ta(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_ta(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_ta(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_ta(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_ta(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_ta(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_ta(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_ta(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_ta(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_ta(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_ta(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_ta(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_ta(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_ta(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_ta(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_ta(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_ta(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_ta(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_ta(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_ta(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_ta(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_ta(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_ta(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_ta(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_ta(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_ta(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_ta(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_ta(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_ta(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_ta(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_ta(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_ta(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_ta(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_ta(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_ta(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_ta(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_ta(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_ta(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_ta(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_ta(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_ta(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_ta(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_ta(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_ta(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_ta(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_ta(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_ta(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_ta(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_ta(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_ta(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_ta(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_ta(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_ta(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_ta(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_ta(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_ta(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_ta(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_ta(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_ta(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_ta(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_ta(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_ta(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_ta(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_ta(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_ta(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_ta(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_ta(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_ta(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_ta(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_ta(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_ta(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_ta(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_ta(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_ta(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_ta(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_ta(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_ta(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_ta(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_ta(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_ta(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_ta(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_ta(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_ta(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_ta(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_ta(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_ta(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_ta(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_ta(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_ta(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_ta(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_ta(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_ta(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_ta(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_ta(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_ta(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_ta(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_ta(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_ta(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_ta(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_ta(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_ta(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_ta(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_ta(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_ta(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_ta(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_ta(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_ta(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_ta(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_ta(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_ta(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_ta(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_ta(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_ta(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_ta(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_ta(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_ta(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_ta(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_ta(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_ta(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_ta(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_ta(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_ta(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_ta(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_ta(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_ta(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_ta(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_ta(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_ta(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_ta(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_ta(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_ta(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_ta(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_ta(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_ta(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_ta(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_ta(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_ta(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_ta(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_ta(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_ta(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_ta(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_ta(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_ta(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_ta(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_ta(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_ta(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_ta(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_ta(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_ta(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_ta(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_ta(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_ta(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_ta(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_ta(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_ta(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_ta(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_ta(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_ta(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_ta(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_ta(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_ta(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_ta(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_ta(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_ta(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_ta(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_ta(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_ta(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_ta(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_ta(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_ta(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_ta(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_ta(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_ta(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_ta(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_ta(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_ta(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_ta(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_ta(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_ta(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_ta(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_ta(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_ta(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_ta(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_ta(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_ta(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_ta(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_ta(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_ta(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_ta(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_ta(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_ta(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_ta(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_ta(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_ta(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_ta(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_ta(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_ta(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_ta(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_ta(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_ta(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_ta(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_ta(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_ta(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_ta(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_ta(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_ta(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_ta(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_ta(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_ta(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_ta(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_ta(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_ta(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_ta(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_ta(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_ta(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_ta(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_ta(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_ta(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_ta(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_ta(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_ta(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_ta(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_ta(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_ta(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_ta(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_ta(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_ta(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_ta(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_ta(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_ta(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_ta(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_ta(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_ta(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_ta(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_ta(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_ta(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_ta(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_ta(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_ta(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_ta(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_ta(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_ta(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_ta(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_ta(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_ta(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_ta(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_ta(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_ta(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_ta(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_ta(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_ta(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_ta(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_ta(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_ta(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_ta(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_ta(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_ta(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_ta(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_ta(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_ta(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_ta(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_ta(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_ta(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_ta(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_ta(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_ta(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_ta(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_ta(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_ta(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_ta(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_ta(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_ta(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_ta(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_ta(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_ta(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_ta(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_ta(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_ta(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_ta(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_ta(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_ta(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_ta(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_ta(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_ta(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_ta(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_ta(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_ta(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_ta(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_ta(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_ta(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_ta(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_ta(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_ta(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_ta(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_ta(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_ta(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_ta(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_ta(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_ta(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_ta(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_ta(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_ta(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_ta(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_ta(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_ta(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_ta(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_ta(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_ta(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_ta(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_ta(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_ta(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_ta(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_ta(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_ta(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_ta(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_ta(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_ta(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_ta(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_ta(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_ta(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_ta(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_ta(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_ta(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_ta(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_ta(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_ta(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_ta(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_ta(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_ta(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_ta(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_ta(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_ta(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_ta(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_ta(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_ta(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_ta(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_ta(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_ta(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_ta(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_ta(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_ta(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_ta(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_ta(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_ta(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_ta(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_ta(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_ta(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_ta(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4_ta(vfloat16mf4_t src, size_t offset, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2_ta(vfloat16mf2_t src, size_t offset, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1_ta(vfloat16m1_t src, size_t offset, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2_ta(vfloat16m2_t src, size_t offset, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4_ta(vfloat16m4_t src, size_t offset, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8_ta(vfloat16m8_t src, size_t offset, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_ta(vfloat32mf2_t src, size_t offset, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_ta(vfloat32m1_t src, size_t offset, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_ta(vfloat32m2_t src, size_t offset, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_ta(vfloat32m4_t src, size_t offset, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_ta(vfloat32m8_t src, size_t offset, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_ta(vfloat64m1_t src, size_t offset, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_ta(vfloat64m2_t src, size_t offset, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_ta(vfloat64m4_t src, size_t offset, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_ta(vfloat64m8_t src, size_t offset, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_ta(vint8mf8_t src, size_t offset, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_ta(vint8mf4_t src, size_t offset, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_ta(vint8mf2_t src, size_t offset, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_ta(vint8m1_t src, size_t offset, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_ta(vint8m2_t src, size_t offset, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_ta(vint8m4_t src, size_t offset, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_ta(vint8m8_t src, size_t offset, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_ta(vint16mf4_t src, size_t offset, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_ta(vint16mf2_t src, size_t offset, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_ta(vint16m1_t src, size_t offset, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_ta(vint16m2_t src, size_t offset, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_ta(vint16m4_t src, size_t offset, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_ta(vint16m8_t src, size_t offset, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_ta(vint32mf2_t src, size_t offset, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_ta(vint32m1_t src, size_t offset, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_ta(vint32m2_t src, size_t offset, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_ta(vint32m4_t src, size_t offset, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_ta(vint32m8_t src, size_t offset, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_ta(vint64m1_t src, size_t offset, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_ta(vint64m2_t src, size_t offset, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_ta(vint64m4_t src, size_t offset, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_ta(vint64m8_t src, size_t offset, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_ta(vuint8mf8_t src, size_t offset, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_ta(vuint8mf4_t src, size_t offset, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_ta(vuint8mf2_t src, size_t offset, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_ta(vuint8m1_t src, size_t offset, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_ta(vuint8m2_t src, size_t offset, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_ta(vuint8m4_t src, size_t offset, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_ta(vuint8m8_t src, size_t offset, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_ta(vuint16mf4_t src, size_t offset, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_ta(vuint16mf2_t src, size_t offset, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_ta(vuint16m1_t src, size_t offset, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_ta(vuint16m2_t src, size_t offset, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_ta(vuint16m4_t src, size_t offset, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_ta(vuint16m8_t src, size_t offset, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_ta(vuint32mf2_t src, size_t offset, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_ta(vuint32m1_t src, size_t offset, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_ta(vuint32m2_t src, size_t offset, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_ta(vuint32m4_t src, size_t offset, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_ta(vuint32m8_t src, size_t offset, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_ta(vuint64m1_t src, size_t offset, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_ta(vuint64m2_t src, size_t offset, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_ta(vuint64m4_t src, size_t offset, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_ta(vuint64m8_t src, size_t offset, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslideup.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_ta(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_ta(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_ta(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_ta(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_ta(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_ta(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_ta(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_ta(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_ta(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_ta(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_ta(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_ta(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_ta(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_ta(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_ta(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_ta(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_ta(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_ta(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_ta(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_ta(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_ta(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_ta(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_ta(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_ta(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_ta(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_ta(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_ta(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_ta(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_ta(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_ta(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_ta(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_ta(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_ta(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_ta(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_ta(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_ta(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_ta(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_ta(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_ta(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_ta(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_ta(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_ta(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_ta(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_ta(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_ta(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_ta(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_ta(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_ta(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_ta(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_ta(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_ta(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_ta(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_ta(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_ta(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_ta(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_ta(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_ta(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_ta(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_ta(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_ta(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_ta(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_ta(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_ta(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_ta(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_ta(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_ta(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_ta(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_ta(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_ta(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_ta(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_ta(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_ta(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_ta(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_ta(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_ta(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_ta(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_ta(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_ta(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_ta(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_ta(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_ta(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_ta(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_ta(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_ta(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_ta(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_ta(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_ta(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_ta(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_ta(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccsu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccsu.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_ta(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_ta(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_ta(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_ta(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_ta(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_ta(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_ta(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_ta(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_ta(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_ta(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_ta(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_ta(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_ta(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_ta(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_ta(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_ta(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_ta(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_ta(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_ta(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_ta(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_ta(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_ta(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_ta(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_ta(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_ta(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_ta(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_ta(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_ta(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_ta(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_ta(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccu.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_tu(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_tu(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_tu(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_tu(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_tu(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_tu(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_tu(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_tu(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_tu(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_tu(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_tu(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_tu(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_tu(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_tu(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_tu(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_ta(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_ta(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_ta(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_ta(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_ta(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_ta(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_ta(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_ta(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_ta(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_ta(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_ta(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_ta(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_ta(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_ta(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_ta(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_ta(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_ta(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_ta(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_ta(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_ta(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_ta(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_ta(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_ta(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_ta(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_ta(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_ta(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_ta(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_ta(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_ta(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_ta(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccus.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccus.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccus.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccus.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_tu(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_tu(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_tu(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_tu(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_tu(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_tu(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_tu(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_tu(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_tu(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_tu(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_tu(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_tu(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_tu(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_tu(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_tu(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_ta(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_ta(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_ta(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_ta(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_ta(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_ta(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_ta(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_ta(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_ta(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_ta(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_ta(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_ta(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_ta(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_ta(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_ta(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_ta(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_ta(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_ta(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_ta(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_ta(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_ta(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_ta(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_ta(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_ta(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_ta(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_ta(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_ta(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_ta(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_ta(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_ta(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_ta(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_ta(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_ta(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_ta(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_ta(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_ta(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_ta(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_ta(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_ta(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_ta(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_ta(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_ta(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[VD:%.*]], double [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_ta(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_ta(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_ta(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_ta(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_ta(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_ta(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_ta(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_ta(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_ta(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_ta(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_ta(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_ta(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_ta(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_ta(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_ta(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_ta(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_ta(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_ta(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_ta(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_ta(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_ta(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_ta(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_ta(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_ta(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_ta(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_ta(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_ta(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_ta(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_ta(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_ta(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_ta(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_ta(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_ta(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_ta(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_ta(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_ta(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_ta(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_ta(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_ta(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_ta(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_ta(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_ta(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_ta(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_ta(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_ta(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_ta(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_ta(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_ta(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_ta(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_ta(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_ta(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_ta(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_ta(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_ta(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_ta(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_ta(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_ta(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_ta(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_ta(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_ta(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_ta(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_ta(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_ta(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_ta(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_ta(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_ta(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_ta(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_ta(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_ta(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_ta(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_ta(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_ta(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_ta(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_ta(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_ta(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_ta(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_ta(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_ta(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_ta(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_ta(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_ta(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_ta(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_ta(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_ta(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_ta(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_ta(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_ta(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_ta(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_ta(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_ta(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_ta(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_ta(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_ta(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_ta(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_ta(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_ta(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_ta(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_ta(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_ta(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_ta(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_ta(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_ta(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_ta(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_ta(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_ta(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_ta(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_ta(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_ta(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_ta(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_ta(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_ta(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_ta(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_ta(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_ta(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_ta(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_ta(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_ta(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_ta(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_ta(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_ta(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_ta(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_ta(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_ta(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_ta(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_ta(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_ta(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_ta(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_ta(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_ta(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_ta(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_ta(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_ta(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_ta(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_ta(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_ta(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_ta(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_ta(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmadd.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_ta(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_ta(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_ta(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_ta(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_ta(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_ta(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_ta(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_ta(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_ta(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_ta(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_ta(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_ta(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_ta(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_ta(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_ta(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_ta(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_ta(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_ta(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_ta(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_ta(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_ta(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_ta(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_ta(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_ta(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_ta(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_ta(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_ta(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_ta(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_ta(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_ta(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_ta(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_ta(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_ta(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_ta(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_ta(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_ta(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_ta(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_ta(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_ta(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_ta(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_ta(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_ta(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_ta(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_ta(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_ta(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_ta(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_ta(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_ta(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_ta(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_ta(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_ta(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_ta(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_ta(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_ta(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_ta(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_ta(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_ta(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_ta(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_ta(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_ta(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_ta(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_ta(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_ta(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_ta(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_ta(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_ta(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_ta(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_ta(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_ta(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_ta(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_ta(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_ta(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_ta(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_ta(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_ta(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_ta(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsac.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_ta(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_ta(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_ta(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_ta(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_ta(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_ta(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_ta(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_ta(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_ta(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_ta(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_ta(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_ta(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_ta(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_ta(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_ta(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_ta(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_ta(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_ta(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_ta(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_ta(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_ta(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_ta(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_ta(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_ta(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_ta(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_ta(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_ta(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_ta(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_ta(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_ta(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_ta(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_ta(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_ta(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_ta(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_ta(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_ta(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_ta(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_ta(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_ta(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_ta(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_ta(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_ta(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_ta(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_ta(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_ta(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_ta(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_ta(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_ta(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_ta(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_ta(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_ta(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_ta(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_ta(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_ta(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_ta(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_ta(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_ta(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_ta(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_ta(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_ta(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_ta(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_ta(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_ta(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_ta(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_ta(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_ta(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_ta(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_ta(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_ta(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_ta(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_ta(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_ta(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_ta(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_ta(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_ta(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_ta(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vnmsub.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_ta(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_ta(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_ta(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_ta(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_ta(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_ta(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_ta(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_ta(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_ta(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_ta(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_ta(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_ta(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_ta(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_ta(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_ta(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_ta(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_ta(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_ta(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_ta(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_ta(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_ta(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_ta(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_ta(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_ta(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_ta(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_ta(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_ta(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_ta(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_ta(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -1080,7 +1080,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_ta(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_ta(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_ta(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_ta(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { @@ -1116,7 +1116,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_ta(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { @@ -1125,7 +1125,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_ta(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { @@ -1134,7 +1134,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_ta(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { @@ -1143,7 +1143,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_ta(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { @@ -1152,7 +1152,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_ta(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { @@ -1161,7 +1161,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_ta(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { @@ -1170,7 +1170,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_ta(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_ta(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_ta(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_ta(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_ta(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -1215,7 +1215,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_ta(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_ta(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -1233,7 +1233,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_ta(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -1242,7 +1242,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_ta(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_ta(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -1260,7 +1260,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_ta(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_ta(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_ta(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_ta(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -1296,7 +1296,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_ta(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -1305,7 +1305,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_ta(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_ta(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { @@ -1323,7 +1323,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_ta(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -1332,7 +1332,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_ta(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -1341,7 +1341,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_ta(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_ta(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_ta(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_ta(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_ta(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -1386,7 +1386,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_ta(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -1395,7 +1395,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_ta(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -1404,7 +1404,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_ta(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_ta(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { @@ -1422,7 +1422,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_ta(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { @@ -1431,7 +1431,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -1440,7 +1440,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_ta(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -1476,7 +1476,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_ta(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -1494,7 +1494,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_ta(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -1503,7 +1503,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { @@ -1512,7 +1512,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_ta(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { @@ -1521,7 +1521,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { @@ -1530,7 +1530,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_ta(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_ta(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { @@ -1566,7 +1566,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_ta(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { @@ -1575,7 +1575,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[VD:%.*]], i64 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_ta(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4_ta(vfloat16mf4_t src, size_t offset, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2_ta(vfloat16mf2_t src, size_t offset, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1_ta(vfloat16m1_t src, size_t offset, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2_ta(vfloat16m2_t src, size_t offset, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4_ta(vfloat16m4_t src, size_t offset, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8_ta(vfloat16m8_t src, size_t offset, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_ta(vfloat32mf2_t src, size_t offset, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_ta(vfloat32m1_t src, size_t offset, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_ta(vfloat32m2_t src, size_t offset, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_ta(vfloat32m4_t src, size_t offset, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_ta(vfloat32m8_t src, size_t offset, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_ta(vfloat64m1_t src, size_t offset, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_ta(vfloat64m2_t src, size_t offset, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_ta(vfloat64m4_t src, size_t offset, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_ta(vfloat64m8_t src, size_t offset, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_ta(vint8mf8_t src, size_t offset, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_ta(vint8mf4_t src, size_t offset, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_ta(vint8mf2_t src, size_t offset, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_ta(vint8m1_t src, size_t offset, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_ta(vint8m2_t src, size_t offset, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_ta(vint8m4_t src, size_t offset, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_ta(vint8m8_t src, size_t offset, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_ta(vint16mf4_t src, size_t offset, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_ta(vint16mf2_t src, size_t offset, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_ta(vint16m1_t src, size_t offset, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_ta(vint16m2_t src, size_t offset, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_ta(vint16m4_t src, size_t offset, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_ta(vint16m8_t src, size_t offset, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_ta(vint32mf2_t src, size_t offset, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_ta(vint32m1_t src, size_t offset, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_ta(vint32m2_t src, size_t offset, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_ta(vint32m4_t src, size_t offset, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_ta(vint32m8_t src, size_t offset, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_ta(vint64m1_t src, size_t offset, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_ta(vint64m2_t src, size_t offset, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_ta(vint64m4_t src, size_t offset, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_ta(vint64m8_t src, size_t offset, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_ta(vuint8mf8_t src, size_t offset, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_ta(vuint8mf4_t src, size_t offset, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_ta(vuint8mf2_t src, size_t offset, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_ta(vuint8m1_t src, size_t offset, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_ta(vuint8m2_t src, size_t offset, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_ta(vuint8m4_t src, size_t offset, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_ta(vuint8m8_t src, size_t offset, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_ta(vuint16mf4_t src, size_t offset, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_ta(vuint16mf2_t src, size_t offset, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_ta(vuint16m1_t src, size_t offset, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_ta(vuint16m2_t src, size_t offset, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_ta(vuint16m4_t src, size_t offset, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_ta(vuint16m8_t src, size_t offset, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_ta(vuint32mf2_t src, size_t offset, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_ta(vuint32m1_t src, size_t offset, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_ta(vuint32m2_t src, size_t offset, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_ta(vuint32m4_t src, size_t offset, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_ta(vuint32m8_t src, size_t offset, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_ta(vuint64m1_t src, size_t offset, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_ta(vuint64m2_t src, size_t offset, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_ta(vuint64m4_t src, size_t offset, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_ta(vuint64m8_t src, size_t offset, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslideup.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_ta(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_ta(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_ta(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { @@ -567,7 +567,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_ta(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { @@ -576,7 +576,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_ta(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { @@ -585,7 +585,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_ta(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_ta(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_ta(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_ta(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { @@ -621,7 +621,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_ta(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { @@ -630,7 +630,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_ta(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_ta(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_ta(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_ta(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_ta(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_ta(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_ta(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_ta(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_ta(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_ta(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_ta(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_ta(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_ta(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_ta(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_ta(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_ta(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_ta(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_ta(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_ta(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_ta(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_ta(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_ta(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_ta(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { @@ -837,7 +837,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_ta(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { @@ -846,7 +846,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_ta(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { @@ -855,7 +855,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_ta(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { @@ -864,7 +864,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_ta(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { @@ -873,7 +873,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_ta(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { @@ -882,7 +882,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_ta(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { @@ -891,7 +891,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_ta(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { @@ -900,7 +900,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_ta(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_ta(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_ta(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { @@ -927,7 +927,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_ta(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { @@ -936,7 +936,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_ta(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_ta(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_ta(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_ta(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_ta(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { @@ -981,7 +981,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_ta(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { @@ -990,7 +990,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_ta(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_ta(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_ta(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { @@ -1017,7 +1017,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_ta(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_ta(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { @@ -1035,7 +1035,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_ta(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { @@ -1044,7 +1044,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_ta(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { @@ -1053,7 +1053,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_ta(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { @@ -1062,7 +1062,7 @@ // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_ta(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmacc.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_ta(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_ta(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_ta(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_ta(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_ta(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_ta(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_ta(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_ta(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_ta(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_ta(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_ta(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_ta(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_ta(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_ta(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_ta(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_ta(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_ta(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_ta(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_ta(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_ta(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_ta(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_ta(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_ta(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_ta(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_ta(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_ta(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_ta(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_ta(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_ta(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_ta(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccsu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccsu.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_ta(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_ta(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_ta(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_ta(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_ta(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_ta(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_ta(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_ta(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_ta(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_ta(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_ta(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_ta(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_ta(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_ta(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_ta(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_ta(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_ta(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_ta(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_ta(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_ta(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_ta(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_ta(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_ta(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_ta(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_ta(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_ta(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_ta(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_ta(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_ta(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_ta(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccu.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_tu(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_tu(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_tu(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_tu(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_tu(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_tu(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_tu(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_tu(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_tu(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_tu(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_tu(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_tu(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_tu(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_tu(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_tu(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_ta(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_ta(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_ta(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_ta(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_ta(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_ta(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_ta(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_ta(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_ta(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_ta(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_ta(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_ta(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_ta(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_ta(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { @@ -405,7 +405,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_ta(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { @@ -414,7 +414,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_ta(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_ta(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_ta(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_ta(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_ta(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_ta(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_ta(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_ta(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { @@ -486,7 +486,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_ta(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_ta(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_ta(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { @@ -513,7 +513,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_ta(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_ta(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { @@ -531,7 +531,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_ta(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { @@ -540,7 +540,7 @@ // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_ta(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccus.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccus.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccus.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwmaccus.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_tu(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_tu(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_tu(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_tu(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_tu(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_tu(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_tu(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_tu(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_tu(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_tu(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_tu(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_tu(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_tu(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_tu(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_tu(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_ta(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_ta(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_ta(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_ta(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_ta(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[VD:%.*]], i8 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_ta(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_ta(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_ta(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_ta(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_ta(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[VD:%.*]], i16 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_ta(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_ta(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_ta(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_ta(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_ta(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64-overloaded.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64-overloaded.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64-overloaded.c @@ -1,8 +1,8 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s -// NOTE: This test file contains eew=64 of vmulh, vmulhu, vmulhsu. -// NOTE: The purpose of separating these 3 instructions from vmul.c is that +// NOTE: This test file contains eew=64 of vmulh. +// NOTE: The purpose of separating this instructions from vmulh.c is that // eew=64 versions only enable when V extension is specified. (Not for zve) #include @@ -81,72 +81,72 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64.c @@ -2,7 +2,7 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s // NOTE: This test file contains eew=64 of vmulh. -// NOTE: The purpose of separating this instructions from vmul.c is that +// NOTE: The purpose of separating this instructions from vmulh.c is that // eew=64 versions only enable when V extension is specified. (Not for zve) #include @@ -81,74 +81,74 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmulh_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmulh_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmulh_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmulh_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-overloaded.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-overloaded.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-overloaded.c @@ -332,324 +332,324 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmulh(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh.c @@ -332,324 +332,324 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmulh_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmulh_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmulh_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmulh_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmulh_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmulh_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmulh_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmulh_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmulh_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmulh_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmulh_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmulh_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmulh_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmulh_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmulh_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmulh_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmulh_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64-overloaded.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64-overloaded.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64-overloaded.c @@ -1,8 +1,8 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s -// NOTE: This test file contains eew=64 of vmulh, vmulhu, vmulhsu. -// NOTE: The purpose of separating these 3 instructions from vmul.c is that +// NOTE: This test file contains eew=64 of vmulhsu. +// NOTE: The purpose of separating this instructions from vmulhsu.c is that // eew=64 versions only enable when V extension is specified. (Not for zve) #include @@ -79,75 +79,74 @@ return vmulhsu(op1, op2, vl); } - // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64.c @@ -1,8 +1,8 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s -// NOTE: This test file contains eew=64 of vmulh, vmulhu, vmulhsu. -// NOTE: The purpose of separating these 3 instructions from vmul.c is that +// NOTE: This test file contains eew=64 of vmulhsu. +// NOTE: The purpose of separating this instructions from vmulhsu.c is that // eew=64 versions only enable when V extension is specified. (Not for zve) #include @@ -81,72 +81,72 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhsu_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhsu_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhsu_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhsu_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-overloaded.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-overloaded.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-overloaded.c @@ -332,324 +332,324 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu.c @@ -332,324 +332,324 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhsu_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhsu_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhsu_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhsu_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhsu_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhsu_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhsu_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhsu_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhsu_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhsu_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhsu_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhsu_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhsu_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhsu_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhsu_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhsu_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhsu_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64-overloaded.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64-overloaded.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64-overloaded.c @@ -1,8 +1,8 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s -// NOTE: This test file contains eew=64 of vmulh, vmulhu, vmulhsu. -// NOTE: The purpose of separating these 3 instructions from vmul.c is that +// NOTE: This test file contains eew=64 of vmulhu. +// NOTE: The purpose of separating this instructions from vmulhu.c is that // eew=64 versions only enable when V extension is specified. (Not for zve) #include @@ -81,72 +81,72 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64.c @@ -1,8 +1,8 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s -// NOTE: This test file contains eew=64 of vmulh, vmulhu, vmulhsu. -// NOTE: The purpose of separating these 3 instructions from vmul.c is that +// NOTE: This test file contains eew=64 of vmulhu. +// NOTE: The purpose of separating this instructions from vmulhu.c is that // eew=64 versions only enable when V extension is specified. (Not for zve) #include @@ -81,72 +81,72 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-overloaded.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-overloaded.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-overloaded.c @@ -332,324 +332,324 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu.c @@ -332,324 +332,324 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64-overloaded.c @@ -80,80 +80,72 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vsmul(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c @@ -80,80 +80,72 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsmul_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsmul_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsmul_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsmul_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m8_m(mask, op1, op2, vl); }